Searched refs:tbl (Results 51 - 75 of 313) sorted by relevance

1234567891011>>

/linux-master/arch/powerpc/platforms/pseries/
H A Diommu.c59 struct iommu_table *tbl; local
61 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
62 if (!tbl)
65 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
66 kref_init(&tbl->it_kref);
67 return tbl;
116 static int tce_build_pSeries(struct iommu_table *tbl, long index, argument
124 const unsigned long tceshift = tbl->it_page_shift;
125 const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
132 tcep = ((__be64 *)tbl
146 tce_free_pSeries(struct iommu_table *tbl, long index, long npages) argument
156 tce_get_pseries(struct iommu_table *tbl, long index) argument
211 tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) argument
317 tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) argument
346 tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) argument
524 iommu_table_setparms_common(struct iommu_table *tbl, unsigned long busno, unsigned long liobn, unsigned long win_addr, unsigned long window_size, unsigned long page_shift, void *base, struct iommu_table_ops *table_ops) argument
542 iommu_table_setparms(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl) argument
586 struct iommu_table *tbl; local
664 tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned long *tce, enum dma_data_direction *direction) argument
773 struct iommu_table *tbl; local
827 struct iommu_table *tbl; local
1568 struct iommu_table *tbl; local
[all...]
H A Dvio.c521 struct iommu_table *tbl = get_iommu_table_base(dev); local
524 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
526 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
533 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
545 struct iommu_table *tbl = get_iommu_table_base(dev); local
547 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
548 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
556 struct iommu_table *tbl = get_iommu_table_base(dev); local
562 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
567 ret = ppc_iommu_map_sg(dev, tbl, sglis
591 struct iommu_table *tbl = get_iommu_table_base(dev); local
705 struct iommu_table *tbl; local
1157 struct iommu_table *tbl; local
1318 struct iommu_table *tbl = get_iommu_table_base(dev); local
[all...]
/linux-master/drivers/media/usb/gspca/gl860/
H A Dgl860.h79 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len);
80 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl,
82 void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len);
H A Dgl860.c580 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) argument
585 if (tbl[n].idx != 0xffff)
586 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val,
587 tbl[n].idx, 0, NULL);
588 else if (tbl[n].val == 0xffff)
591 msleep(tbl[n].val);
596 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, argument
600 if (tbl[n].idx != 0xffff)
601 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[
611 fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len) argument
[all...]
/linux-master/arch/mips/kernel/syscalls/
H A DMakefile22 $(uapi)/unistd_%.h: $(src)/syscall_%.tbl $(syshdr) FORCE
29 $(kapi)/unistd_nr_%.h: $(src)/syscall_%.tbl $(sysnr) FORCE
32 $(kapi)/syscall_table_%.h: $(src)/syscall_%.tbl $(systbl) FORCE
/linux-master/include/trace/events/
H A Dneigh.h25 TP_PROTO(struct neigh_table *tbl, struct net_device *dev,
29 TP_ARGS(tbl, dev, pkey, n, exempt_from_gc),
44 __entry->family = tbl->family;
46 __entry->entries = atomic_read(&tbl->gc_entries);
51 if (tbl->family == AF_INET)
57 if (tbl->family == AF_INET6) {
105 __entry->family = n->tbl->family;
117 if (n->tbl->family == AF_INET)
123 if (n->tbl->family == AF_INET6) {
182 __entry->family = n->tbl
[all...]
/linux-master/drivers/infiniband/hw/bnxt_re/
H A Dqplib_res.c422 struct bnxt_qplib_hwq *tbl; local
433 tbl = &ctx->qtbl[i];
434 if (!tbl->max_elements)
438 switch (tbl->level) {
440 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
443 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
451 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
488 * Depending on the size of the tbl requested, either a 1 Page Buffer List
570 kfree(sgid_tbl->tbl);
574 sgid_tbl->tbl
[all...]
/linux-master/tools/perf/arch/mips/
H A DMakefile10 sysdef := $(sysprf)/syscall_n64.tbl
/linux-master/tools/perf/arch/s390/
H A DMakefile16 sysdef := $(sysprf)/syscall.tbl
/linux-master/drivers/clk/tegra/
H A Dclk.c269 void tegra_init_from_table(struct tegra_clk_init_table *tbl, argument
274 for (; tbl->clk_id < clk_max; tbl++) {
275 clk = clks[tbl->clk_id];
278 __func__, PTR_ERR(clk), tbl->clk_id);
284 if (tbl->parent_id < clk_max) {
285 struct clk *parent = clks[tbl->parent_id];
294 if (tbl->rate)
295 if (clk_set_rate(clk, tbl->rate)) {
297 __func__, tbl
[all...]
/linux-master/drivers/pci/
H A Ddevres.c307 void __iomem **tbl; local
311 tbl = (void __iomem **)pcim_iomap_table(pdev);
312 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
315 tbl[bar] = pci_iomap(pdev, bar, maxlen);
316 return tbl[bar];
329 void __iomem **tbl; local
334 tbl = (void __iomem **)pcim_iomap_table(pdev);
335 BUG_ON(!tbl);
338 if (tbl[
[all...]
/linux-master/scripts/dtc/
H A Dlivetree.c694 struct reserve_info *ri, **tbl; local
705 tbl = xmalloc(n * sizeof(*tbl));
710 tbl[i++] = ri;
712 qsort(tbl, n, sizeof(*tbl), cmp_reserve_info);
714 dti->reservelist = tbl[0];
716 tbl[i]->next = tbl[i+1];
717 tbl[
735 struct property *prop, **tbl; local
771 struct node *subnode, **tbl; local
[all...]
/linux-master/arch/sparc/include/asm/
H A Diommu_64.h49 struct iommu_map_table tbl; member in struct:atu
56 struct iommu_map_table tbl; member in struct:iommu
/linux-master/scripts/atomic/
H A Dgen-atomics.sh7 ATOMICTBL=${ATOMICDIR}/atomics.tbl
/linux-master/include/linux/
H A Drhashtable-types.h71 * @tbl: Bucket table
82 struct bucket_table __rcu *tbl; member in struct:rhashtable
104 * @tbl: The table that we were walking over
108 struct bucket_table *tbl; member in struct:rhashtable_walker
/linux-master/tools/perf/arch/powerpc/util/
H A Dkvm-stat.c46 struct exit_reasons_table *tbl = hcall_reasons; local
48 while (tbl->reason != NULL) {
49 if (tbl->exit_code == exit_code)
50 return tbl->reason;
51 tbl++;
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_matcher.c394 mlx5dr_dbg(matcher->tbl->dmn,
408 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
802 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
823 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
841 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
900 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
944 struct mlx5dr_table *tbl = matcher->tbl; local
945 struct mlx5dr_domain *dmn = tbl->dmn;
954 matcher->rx.nic_tbl = &tbl
989 mlx5dr_matcher_create(struct mlx5dr_table *tbl, u32 priority, u8 match_criteria_enable, struct mlx5dr_match_parameters *mask) argument
1093 struct mlx5dr_table *tbl = matcher->tbl; local
[all...]
/linux-master/drivers/net/wireless/mediatek/mt76/
H A Dmt792x_acpi_sar.c8 mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len) argument
40 if (!*tbl) {
41 *tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
43 if (!*tbl) {
58 *(*tbl + i) = (u8)sar_unit->integer.value;
227 band_pwr = &asar->geo->tbl[idx].band[0];
228 max = ARRAY_SIZE(asar->geo->tbl[idx].band);
230 band_pwr = &asar->geo_v2->tbl[idx].band[0];
231 max = ARRAY_SIZE(asar->geo_v2->tbl[idx].band);
270 limit = &asar->dyn->tbl[
[all...]
/linux-master/arch/sparc/kernel/
H A Diommu.c52 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
105 iommu->tbl.table_map_base = dma_offset;
111 iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112 if (!iommu->tbl.map)
115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
150 kfree(iommu->tbl.map);
151 iommu->tbl.map = NULL;
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
230 *dma_addrp = (iommu->tbl.table_map_base +
256 iommu_tbl_range_free(&iommu->tbl, dvm
594 struct iommu_map_table *tbl = &iommu->tbl; local
682 struct iommu_map_table *tbl = &iommu->tbl; local
718 struct iommu_map_table *tbl = &iommu->tbl; local
[all...]
H A Diommu-common.c223 static struct iommu_pool *get_pool(struct iommu_map_table *tbl, argument
227 unsigned long largepool_start = tbl->large_pool.start;
228 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
232 p = &tbl->large_pool;
234 unsigned int pool_nr = entry / tbl->poolsize;
236 BUG_ON(pool_nr >= tbl->nr_pools);
237 p = &tbl->pools[pool_nr];
/linux-master/drivers/net/wireless/realtek/rtw88/
H A Dphy.h33 void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
34 void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
35 void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
36 void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
38 void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
40 void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
42 void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
/linux-master/kernel/
H A Ducount.c97 struct ctl_table *tbl; local
101 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
102 if (tbl) {
105 tbl[i].data = &ns->ucount_max[i];
107 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl,
111 kfree(tbl);
122 struct ctl_table *tbl; local
124 tbl = ns->sysctls->ctl_table_arg;
127 kfree(tbl);
/linux-master/fs/nfs/
H A Dcallback_proc.c406 validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, argument
412 if (args->csa_slotid > tbl->server_highest_slotid)
418 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
460 struct nfs4_slot_table *tbl; local
469 tbl = &session->fc_slot_table;
481 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
499 struct nfs4_slot_table *tbl; local
514 tbl = &clp->cl_session->bc_slot_table;
522 spin_lock(&tbl->slot_tbl_lock);
524 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl
[all...]
/linux-master/net/openvswitch/
H A Dflow_table.c245 static int tbl_mask_array_realloc(struct flow_table *tbl, int size) argument
254 old = ovsl_dereference(tbl->mask_array);
265 rcu_assign_pointer(tbl->mask_array, new);
270 static int tbl_mask_array_add_mask(struct flow_table *tbl, argument
273 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
277 err = tbl_mask_array_realloc(tbl, ma->max +
282 ma = ovsl_dereference(tbl->mask_array);
298 static void tbl_mask_array_del_mask(struct flow_table *tbl, argument
301 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
324 tbl_mask_array_realloc(tbl, m
331 flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) argument
727 flow_lookup(struct flow_table *tbl, struct table_instance *ti, struct mask_array *ma, const struct sw_flow_key *key, u32 *n_mask_hit, u32 *n_cache_hit, u32 *index) argument
783 ovs_flow_tbl_lookup_stats(struct flow_table *tbl, const struct sw_flow_key *key, u32 skb_hash, u32 *n_mask_hit, u32 *n_cache_hit) argument
847 ovs_flow_tbl_lookup(struct flow_table *tbl, const struct sw_flow_key *key) argument
867 ovs_flow_tbl_lookup_exact(struct flow_table *tbl, const struct sw_flow_match *match) argument
917 ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, const struct sw_flow_id *ufid) argument
987 flow_mask_find(const struct flow_table *tbl, const struct sw_flow_mask *mask) argument
1006 flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, const struct sw_flow_mask *new) argument
[all...]
/linux-master/arch/arm64/kvm/hyp/include/nvhe/
H A Dpkvm.h56 void pkvm_hyp_vm_table_init(void *tbl);

Completed in 239 milliseconds

1234567891011>>