Searched refs:tbl (Results 1 - 25 of 313) sorted by last modified time

1234567891011>>

/linux-master/drivers/gpu/drm/amd/display/dc/bios/
H A Dbios_parser2.c166 struct object_info_table *tbl = &bp->object_info_tbl; local
167 struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4;
169 struct display_object_info_table_v1_5 *v1_5 = tbl->v1_5;
208 struct object_info_table *tbl = &bp->object_info_tbl; local
225 for (i = 0; i < tbl->v1_4->number_of_path; i++) {
227 tbl->v1_4->display_path[i].encoderobjid);
241 for (i = 0; i < tbl->v1_5->number_of_path; i++) {
243 tbl->v1_5->display_path[i].encoderobjid);
261 for (i = 0; i < tbl->v1_4->number_of_path; i++) {
263 tbl
3119 struct object_info_table *tbl; local
3234 struct object_info_table *tbl; local
3331 struct object_info_table *tbl; local
[all...]
/linux-master/drivers/power/supply/
H A Drt9455_charger.c251 * Iterate through each element of the 'tbl' array until an element whose value
255 static unsigned int rt9455_find_idx(const int tbl[], int tbl_size, int v) argument
266 if (v <= tbl[i])
274 const int tbl[], int tbl_size, int *val)
284 *val = tbl[v];
291 const int tbl[], int tbl_size, int val)
293 unsigned int idx = rt9455_find_idx(tbl, tbl_size, val);
272 rt9455_get_field_val(struct rt9455_info *info, enum rt9455_fields field, const int tbl[], int tbl_size, int *val) argument
289 rt9455_set_field_val(struct rt9455_info *info, enum rt9455_fields field, const int tbl[], int tbl_size, int val) argument
/linux-master/net/mac80211/
H A Dmesh_pathtbl.c20 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
82 struct mesh_table *tbl = tblptr; local
84 mesh_path_free_rcu(tbl, mpath);
87 static void mesh_table_init(struct mesh_table *tbl) argument
89 INIT_HLIST_HEAD(&tbl->known_gates);
90 INIT_HLIST_HEAD(&tbl->walk_head);
91 atomic_set(&tbl->entries, 0);
92 spin_lock_init(&tbl->gates_lock);
93 spin_lock_init(&tbl->walk_lock);
98 WARN_ON(rhashtable_init(&tbl
101 mesh_table_free(struct mesh_table *tbl) argument
243 mpath_lookup(struct mesh_table *tbl, const u8 *dst, struct ieee80211_sub_if_data *sdata) argument
280 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) argument
339 struct mesh_table *tbl; local
374 mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) argument
683 struct mesh_table *tbl; local
725 struct mesh_table *tbl; local
773 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; local
795 mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath) argument
811 __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) argument
836 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths; local
851 struct mesh_table *tbl = &sdata->u.mesh.mpp_paths; local
863 table_flush_by_iface(struct mesh_table *tbl) argument
897 table_path_del(struct mesh_table *tbl, struct ieee80211_sub_if_data *sdata, const u8 *addr) argument
966 struct mesh_table *tbl; local
1060 mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) argument
[all...]
/linux-master/net/ipv4/
H A Droute.c3552 struct ctl_table *tbl; local
3555 tbl = ipv4_route_netns_table;
3559 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3560 if (!tbl)
3565 if (tbl[0].procname != ipv4_route_flush_procname) {
3566 tbl[0].procname = NULL;
3575 tbl[i].data += (void *)net - (void *)&init_net;
3577 tbl[0].extra1 = net;
3580 tbl, table_siz
3594 struct ctl_table *tbl; local
[all...]
/linux-master/arch/arc/kernel/
H A Dsetup.c178 const struct id_to_str *tbl; local
183 for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
184 if (uarch.maj == tbl->id) {
185 release = tbl->str;
/linux-master/fs/bcachefs/
H A Dbtree_io.c2309 struct bucket_table *tbl; local
2316 for_each_cached_btree(b, c, tbl, i, pos)
H A Dbtree_key_cache.c298 struct bucket_table *tbl; local
305 tbl = rht_dereference_rcu(c->table.tbl, &c->table);
306 for (i = 0; i < tbl->size; i++)
307 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
831 struct bucket_table *tbl; local
872 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
873 if (bc->shrink_iter >= tbl->size)
880 pos = rht_ptr_rcu(rht_bucket(tbl, b
930 struct bucket_table *tbl; local
[all...]
H A Ddebug.c557 struct bucket_table *tbl;
567 tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
569 if (i->iter < tbl->size) {
570 rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
/linux-master/drivers/misc/cardreader/
H A Drtsx_pcr.c624 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) argument
628 while (*tbl & 0xFFFF0000) {
630 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
631 tbl++;
639 const u32 *tbl; local
642 tbl = pcr->sd_pull_ctl_enable_tbl;
644 tbl = pcr->ms_pull_ctl_enable_tbl;
648 return rtsx_pci_set_pull_ctl(pcr, tbl);
654 const u32 *tbl; local
[all...]
/linux-master/arch/powerpc/kernel/
H A Diommu.c45 struct iommu_table *tbl = data; local
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
51 static void iommu_debugfs_add(struct iommu_table *tbl) argument
56 sprintf(name, "%08lx", tbl->it_index);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl
68 iommu_debugfs_del(struct iommu_table *tbl) argument
76 iommu_debugfs_add(struct iommu_table *tbl) argument
77 iommu_debugfs_del(struct iommu_table *tbl) argument
213 iommu_range_alloc(struct device *dev, struct iommu_table *tbl, unsigned long npages, unsigned long *handle, unsigned long mask, unsigned int align_order) argument
345 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, unsigned long attrs) argument
388 iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
416 get_pool(struct iommu_table *tbl, unsigned long entry) argument
435 __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
457 iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
470 ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) argument
613 ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) argument
645 iommu_table_clear(struct iommu_table *tbl) argument
686 iommu_table_reserve_pages(struct iommu_table *tbl, unsigned long res_start, unsigned long res_end) argument
724 iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end) argument
781 iommu_table_in_use(struct iommu_table *tbl) argument
804 struct iommu_table *tbl; local
829 iommu_tce_table_get(struct iommu_table *tbl) argument
838 iommu_tce_table_put(struct iommu_table *tbl) argument
852 iommu_map_page(struct device *dev, struct iommu_table *tbl, struct page *page, unsigned long offset, size_t size, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) argument
891 iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) argument
910 iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, size_t size, dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) argument
960 iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) argument
1035 iommu_flush_tce(struct iommu_table *tbl) argument
1077 iommu_tce_xchg_no_kill(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) argument
1096 iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages) argument
1105 iommu_take_ownership(struct iommu_table *tbl) argument
1138 iommu_release_ownership(struct iommu_table *tbl) argument
1209 struct iommu_table *tbl = table_group->tables[0]; local
1223 spapr_tce_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) argument
1239 struct iommu_table *tbl = table_group->tables[i]; local
1260 struct iommu_table *tbl = table_group->tables[i]; local
[all...]
/linux-master/drivers/iommu/intel/
H A Diommu.c2450 struct context_entry **tbl,
2469 tbl[tbl_idx] = new_ce;
2521 tbl[tbl_idx + pos] = new_ce;
2448 copy_context_table(struct intel_iommu *iommu, struct root_entry *old_re, struct context_entry **tbl, int bus, bool ext) argument
/linux-master/drivers/iommu/amd/
H A Diommu.c1720 static void free_gcr3_tbl_level1(u64 *tbl) argument
1726 if (!(tbl[i] & GCR3_VALID))
1729 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
1735 static void free_gcr3_tbl_level2(u64 *tbl) argument
1741 if (!(tbl[i] & GCR3_VALID))
1744 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
/linux-master/net/ipv6/
H A Dip6_fib.c2577 lockdep_is_held(&iter->tbl->tb6_lock));
2591 iter->w.root = &iter->tbl->tb6_root;
2600 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, argument
2606 if (tbl) {
2607 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
2608 node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
2651 spin_lock_bh(&iter->tbl->tb6_lock);
2653 spin_unlock_bh(&iter->tbl->tb6_lock);
2662 iter->tbl = ipv6_route_seq_next_table(iter->tbl, ne
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_dbg.c158 void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl) argument
160 mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
161 list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
162 mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
165 void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl) argument
167 mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
168 list_del(&tbl->dbg_node);
169 mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
174 struct mlx5dr_domain *dmn = rule->matcher->tbl
865 dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl) argument
1050 struct mlx5dr_table *tbl; local
[all...]
/linux-master/drivers/cxl/core/
H A Dcdat.c427 } *tbl = (struct acpi_cdat_sslbis_table *)header; local
428 int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
437 if (!remain || remain % sizeof(tbl->entries[0]) ||
443 sslbis = &tbl->sslbis_header;
448 entries = remain / sizeof(tbl->entries[0]);
449 if (struct_size(tbl, entries, entries) != len)
453 u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
454 u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
484 le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
485 le_val = (__force __le16)tbl
[all...]
/linux-master/fs/nfsd/
H A Dnfs4state.c3223 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) argument
3228 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3242 struct list_head *tbl = nn->conf_id_hashtbl; local
3245 return find_client_in_id_table(tbl, clid, sessions);
3251 struct list_head *tbl = nn->unconf_id_hashtbl; local
3254 return find_client_in_id_table(tbl, clid, sessions);
/linux-master/drivers/scsi/
H A Dmyrs.c1338 struct myrs_cpu_type_tbl *tbl; local
1346 tbl = myrs_cpu_type_names;
1348 if (tbl[i].type == info->cpu[0].cpu_type) {
1349 first_processor = tbl[i].name;
1355 tbl = myrs_cpu_type_names;
1357 if (tbl[i].type == info->cpu[1].cpu_type) {
1358 second_processor = tbl[i].name;
/linux-master/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_ipsec.c44 * @tbl: table selector
50 enum ixgbe_ipsec_tbl_sel tbl)
56 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
49 ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, enum ixgbe_ipsec_tbl_sel tbl) argument
/linux-master/drivers/firmware/efi/
H A Defi.c780 efi_rt_properties_table_t *tbl; local
782 tbl = early_memremap(rt_prop, sizeof(*tbl));
783 if (tbl) {
784 efi.runtime_supported_mask &= tbl->runtime_services_supported;
785 early_memunmap(tbl, sizeof(*tbl));
791 struct linux_efi_initrd *tbl; local
793 tbl = early_memremap(initrd, sizeof(*tbl));
[all...]
/linux-master/include/linux/soc/qcom/
H A Dgeni-se.h497 int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl);
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_ddp.c494 hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
495 hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
523 hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
524 hw->tnl.tbl[hw->tnl.count].valid = false;
525 hw->tnl.tbl[hw->tnl.count].boost_addr = val;
526 hw->tnl.tbl[hw->tnl.count].port = 0;
2081 ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
2082 &hw->tnl.tbl[i].boost_entry);
2083 if (hw->tnl.tbl[i].boost_entry) {
2084 hw->tnl.tbl[
[all...]
/linux-master/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_main.c14836 struct bdn_fc_npiv_tbl *tbl = NULL; local
14846 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14847 if (!tbl) {
14860 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14868 entries = tbl->fc_npiv_cfg.num_of_npiv;
14870 tbl->fc_npiv_cfg.num_of_npiv = entries;
14872 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14876 } else if (tbl
[all...]
/linux-master/drivers/mtd/ubi/
H A Dubi.h877 void ubi_eba_destroy_table(struct ubi_eba_table *tbl);
880 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl);
H A Deba.c123 struct ubi_eba_table *tbl; local
127 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
128 if (!tbl)
131 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
133 if (!tbl->entries)
137 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
139 return tbl;
142 kfree(tbl);
153 ubi_eba_destroy_table(struct ubi_eba_table *tbl) argument
191 ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl) argument
1641 struct ubi_eba_table *tbl; local
[all...]
/linux-master/drivers/iio/adc/
H A Dad4130.c1364 static int _ad4130_find_table_index(const unsigned int *tbl, size_t len, argument
1370 if (tbl[i] == val)

Completed in 434 milliseconds

1234567891011>>