Searched refs:tbl (Results 1 - 25 of 313) sorted by relevance

1234567891011>>

/linux-master/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_doorbell.c56 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; local
61 tbl->last = 0;
62 tbl->top = 0;
63 tbl->max = num;
64 tbl->mask = mask;
65 spin_lock_init(&tbl->lock);
66 tbl->table = bitmap_zalloc(num, GFP_KERNEL);
67 if (!tbl->table)
71 __set_bit(0, tbl
78 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; local
85 struct pvrdma_id_table *tbl; local
117 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; local
[all...]
/linux-master/tools/perf/util/
H A Dsyscalltbl.h15 void syscalltbl__delete(struct syscalltbl *tbl);
17 const char *syscalltbl__name(const struct syscalltbl *tbl, int id);
18 int syscalltbl__id(struct syscalltbl *tbl, const char *name);
20 int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
21 int syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx);
H A Dsyscalltbl.c67 static int syscalltbl__init_native(struct syscalltbl *tbl) argument
76 entries = tbl->syscalls.entries = malloc(sizeof(struct syscall) * nr_entries);
77 if (tbl->syscalls.entries == NULL)
88 qsort(tbl->syscalls.entries, nr_entries, sizeof(struct syscall), syscallcmp);
89 tbl->syscalls.nr_entries = nr_entries;
90 tbl->syscalls.max_id = syscalltbl_native_max_id;
96 struct syscalltbl *tbl = malloc(sizeof(*tbl)); local
97 if (tbl) {
98 if (syscalltbl__init_native(tbl)) {
106 syscalltbl__delete(struct syscalltbl *tbl) argument
117 syscalltbl__id(struct syscalltbl *tbl, const char *name) argument
126 syscalltbl__strglobmatch_next(struct syscalltbl *tbl, const char *syscall_glob, int *idx) argument
141 syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx) argument
153 struct syscalltbl *tbl = zalloc(sizeof(*tbl)); local
159 syscalltbl__delete(struct syscalltbl *tbl) argument
164 syscalltbl__name(const struct syscalltbl *tbl, int id) argument
169 syscalltbl__id(struct syscalltbl *tbl, const char *name) argument
180 syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_glob, int *idx) argument
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_table.c28 action->dest_tbl->tbl->rx.s_anchor->chunk :
29 action->dest_tbl->tbl->tx.s_anchor->chunk;
46 int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl, argument
54 mlx5dr_domain_lock(tbl->dmn);
56 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
57 tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
58 ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
63 if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
64 tbl
92 dr_table_uninit_fdb(struct mlx5dr_table *tbl) argument
98 dr_table_uninit(struct mlx5dr_table *tbl) argument
159 dr_table_init_fdb(struct mlx5dr_table *tbl) argument
178 dr_table_init(struct mlx5dr_table *tbl) argument
213 dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) argument
220 dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid) argument
253 struct mlx5dr_table *tbl; local
288 mlx5dr_table_destroy(struct mlx5dr_table *tbl) argument
311 mlx5dr_table_get_id(struct mlx5dr_table *tbl) argument
[all...]
/linux-master/fs/nfs/
H A Dnfs4session.c27 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue) argument
29 tbl->highest_used_slotid = NFS4_NO_SLOT;
30 spin_lock_init(&tbl->slot_tbl_lock);
31 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
32 init_waitqueue_head(&tbl->slot_waitq);
33 init_completion(&tbl->complete);
39 static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) argument
42 if (newsize >= tbl->max_slots)
45 p = &tbl->slots;
53 tbl
62 nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) argument
83 nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) argument
104 nfs4_new_slot(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_init, gfp_t gfp_mask) argument
120 nfs4_find_or_create_slot(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_init, gfp_t gfp_mask) argument
142 nfs4_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) argument
159 nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) argument
172 nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid) argument
201 nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_nr) argument
222 nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seq_nr, unsigned long timeout) argument
242 nfs4_alloc_slot(struct nfs4_slot_table *tbl) argument
262 nfs4_grow_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, u32 ivalue) argument
272 nfs4_reset_slot_table(struct nfs4_slot_table *tbl, u32 server_highest_slotid, u32 ivalue) argument
297 nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, u32 ivalue) argument
326 nfs4_release_slot_table(struct nfs4_slot_table *tbl) argument
336 nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl) argument
350 nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs, const char *queue) argument
362 struct nfs4_slot_table *tbl = slot->table; local
375 __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) argument
383 nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot) argument
391 nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl) argument
403 nfs41_wake_slot_table(struct nfs4_slot_table *tbl) argument
413 nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl, u32 target_highest_slotid) argument
428 nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl, u32 target_highest_slotid) argument
437 nfs41_set_target_slotid(struct nfs4_slot_table *tbl, u32 target_highest_slotid) argument
448 nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl, u32 highest_slotid) argument
489 nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl, u32 new_target) argument
510 nfs41_update_target_slotid(struct nfs4_slot_table *tbl, struct nfs4_slot *slot, struct nfs4_sequence_res *res) argument
538 struct nfs4_slot_table *tbl; local
[all...]
H A Dnfs4session.h83 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
85 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
86 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
87 extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
88 extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
91 extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
92 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
93 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
94 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
96 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl);
98 nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) argument
103 nfs4_test_locked_slot(const struct nfs4_slot_table *tbl, u32 slotid) argument
[all...]
/linux-master/net/netfilter/
H A Dxt_repldata.h9 /* tbl has the following structure equivalent, but is C99 compliant:
14 * } *tbl;
24 } *tbl; \
26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \
28 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \
29 if (tbl == NULL) \
31 term = (struct type##_error *)&(((char *)tbl)[term_offset]); \
32 strscpy_pad(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
34 tbl
[all...]
/linux-master/arch/powerpc/kernel/
H A Diommu.c45 struct iommu_table *tbl = data; local
46 *val = bitmap_weight(tbl->it_map, tbl->it_size);
51 static void iommu_debugfs_add(struct iommu_table *tbl) argument
56 sprintf(name, "%08lx", tbl->it_index);
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl
68 iommu_debugfs_del(struct iommu_table *tbl) argument
76 iommu_debugfs_add(struct iommu_table *tbl) argument
77 iommu_debugfs_del(struct iommu_table *tbl) argument
213 iommu_range_alloc(struct device *dev, struct iommu_table *tbl, unsigned long npages, unsigned long *handle, unsigned long mask, unsigned int align_order) argument
345 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, unsigned long attrs) argument
388 iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
416 get_pool(struct iommu_table *tbl, unsigned long entry) argument
435 __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
457 iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) argument
470 ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) argument
613 ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) argument
645 iommu_table_clear(struct iommu_table *tbl) argument
686 iommu_table_reserve_pages(struct iommu_table *tbl, unsigned long res_start, unsigned long res_end) argument
724 iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end) argument
781 iommu_table_in_use(struct iommu_table *tbl) argument
804 struct iommu_table *tbl; local
829 iommu_tce_table_get(struct iommu_table *tbl) argument
838 iommu_tce_table_put(struct iommu_table *tbl) argument
852 iommu_map_page(struct device *dev, struct iommu_table *tbl, struct page *page, unsigned long offset, size_t size, unsigned long mask, enum dma_data_direction direction, unsigned long attrs) argument
891 iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) argument
910 iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, size_t size, dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node) argument
960 iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) argument
1035 iommu_flush_tce(struct iommu_table *tbl) argument
1077 iommu_tce_xchg_no_kill(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) argument
1096 iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages) argument
1105 iommu_take_ownership(struct iommu_table *tbl) argument
1138 iommu_release_ownership(struct iommu_table *tbl) argument
1209 struct iommu_table *tbl = table_group->tables[0]; local
1223 spapr_tce_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) argument
1239 struct iommu_table *tbl = table_group->tables[i]; local
1260 struct iommu_table *tbl = table_group->tables[i]; local
[all...]
/linux-master/drivers/firmware/efi/
H A Dmemattr.c24 efi_memory_attributes_table_t *tbl; local
29 tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
30 if (!tbl) {
36 if (tbl->version > 2) {
38 tbl->version);
42 tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
47 early_memunmap(tbl, sizeo
131 efi_memory_attributes_table_t *tbl; local
[all...]
/linux-master/arch/powerpc/include/asm/
H A Diommu.h43 int (*set)(struct iommu_table *tbl,
54 int (*xchg_no_kill)(struct iommu_table *tbl,
59 void (*tce_kill)(struct iommu_table *tbl,
63 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
65 void (*clear)(struct iommu_table *tbl,
68 unsigned long (*get)(struct iommu_table *tbl, long index);
69 void (*flush)(struct iommu_table *tbl);
70 void (*free)(struct iommu_table *tbl);
120 #define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
121 ((tbl)
127 get_iommu_order(unsigned long size, struct iommu_table *tbl) argument
[all...]
/linux-master/arch/powerpc/platforms/powernv/
H A Dpci-ioda-tce.c48 void pnv_pci_setup_iommu_table(struct iommu_table *tbl, argument
52 tbl->it_blocksize = 16;
53 tbl->it_base = (unsigned long)tce_mem;
54 tbl->it_page_shift = page_shift;
55 tbl->it_offset = dma_offset >> tbl->it_page_shift;
56 tbl->it_index = 0;
57 tbl->it_size = tce_size >> 3;
58 tbl->it_busno = 0;
59 tbl
83 pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) argument
124 pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) argument
147 pnv_tce_xchg(struct iommu_table *tbl, long index, unsigned long *hpa, enum dma_data_direction *direction) argument
181 pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc) argument
190 pnv_tce_free(struct iommu_table *tbl, long index, long npages) argument
206 pnv_tce_get(struct iommu_table *tbl, long index) argument
240 pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl) argument
290 pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, __u32 page_shift, __u64 window_size, __u32 levels, bool alloc_userspace_copy, struct iommu_table *tbl) argument
370 pnv_pci_unlink_table_and_group(struct iommu_table *tbl, struct iommu_table_group *table_group) argument
410 pnv_pci_link_table_and_group(int node, int num, struct iommu_table *tbl, struct iommu_table_group *table_group) argument
[all...]
/linux-master/lib/
H A Drhashtable.c38 const struct bucket_table *tbl,
41 return rht_head_hashfn(ht, tbl, he, ht->p);
53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) argument
57 if (unlikely(tbl->nest))
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
67 const struct bucket_table *tbl)
70 * because it's set at the same time as tbl->nest.
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
94 static void nested_bucket_table_free(const struct bucket_table *tbl) argument
96 unsigned int size = tbl
37 head_hashfn(struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he) argument
66 nested_table_top( const struct bucket_table *tbl) argument
109 bucket_table_free(const struct bucket_table *tbl) argument
152 struct bucket_table *tbl; local
179 struct bucket_table *tbl = NULL; local
211 rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) argument
413 struct bucket_table *tbl; local
442 rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) argument
490 rhashtable_lookup_one(struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) argument
541 rhashtable_insert_one( struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) argument
594 struct bucket_table *tbl; local
793 struct bucket_table *tbl = iter->walker.tbl; local
938 struct bucket_table *tbl = iter->walker.tbl; local
1022 struct bucket_table *tbl; local
1138 struct bucket_table *tbl, *next_tbl; local
1178 __rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) argument
1207 rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) argument
1218 rht_bucket_nested_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) argument
[all...]
/linux-master/include/linux/
H A Drhashtable.h119 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, argument
122 return hash & (tbl->size - 1);
156 struct rhashtable *ht, const struct bucket_table *tbl,
159 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
161 return rht_bucket_index(tbl, hash);
165 struct rhashtable *ht, const struct bucket_table *tbl,
171 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
173 tbl->hash_rnd)) :
174 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
180 * @tbl
155 rht_key_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const void *key, const struct rhashtable_params params) argument
164 rht_head_hashfn( struct rhashtable *ht, const struct bucket_table *tbl, const struct rhash_head *he, const struct rhashtable_params params) argument
182 rht_grow_above_75(const struct rhashtable *ht, const struct bucket_table *tbl) argument
195 rht_shrink_below_30(const struct rhashtable *ht, const struct bucket_table *tbl) argument
208 rht_grow_above_100(const struct rhashtable *ht, const struct bucket_table *tbl) argument
220 rht_grow_above_max(const struct rhashtable *ht, const struct bucket_table *tbl) argument
235 lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) argument
286 rht_bucket( const struct bucket_table *tbl, unsigned int hash) argument
293 rht_bucket_var( struct bucket_table *tbl, unsigned int hash) argument
300 rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) argument
326 rht_lock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt) argument
337 rht_lock_nested(struct bucket_table *tbl, struct rhash_lock_head __rcu **bucket, unsigned int subclass) argument
349 rht_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, unsigned long flags) argument
379 rht_ptr( struct rhash_lock_head __rcu *const *bkt, struct bucket_table *tbl, unsigned int hash) argument
401 rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, struct rhash_head *obj, unsigned long flags) argument
598 struct bucket_table *tbl; local
715 struct bucket_table *tbl; local
995 __rhashtable_remove_fast_one( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) argument
1081 struct bucket_table *tbl; local
1148 __rhashtable_replace_fast( struct rhashtable *ht, struct bucket_table *tbl, struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) argument
1216 struct bucket_table *tbl; local
[all...]
/linux-master/net/netfilter/ipvs/
H A Dip_vs_lblc.c9 * *lock(tbl->lock) ==> *lock(&tbl->lock)
10 * Wensong Zhang : fixed the uninitialized tbl->lock bug
168 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) argument
172 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
173 atomic_inc(&tbl->entries);
179 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, argument
185 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
198 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, argument
203 en = ip_vs_lblc_get(af, tbl, dadd
231 struct ip_vs_lblc_table *tbl = svc->sched_data; local
258 struct ip_vs_lblc_table *tbl = svc->sched_data; local
296 struct ip_vs_lblc_table *tbl = from_timer(tbl, t, periodic_timer); local
346 struct ip_vs_lblc_table *tbl; local
384 struct ip_vs_lblc_table *tbl = svc->sched_data; local
484 struct ip_vs_lblc_table *tbl = svc->sched_data; local
[all...]
H A Dip_vs_lblcr.c331 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) argument
335 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
336 atomic_inc(&tbl->entries);
342 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, argument
348 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
361 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, argument
366 en = ip_vs_lblcr_get(af, tbl, daddr);
382 ip_vs_lblcr_hash(tbl, en);
397 struct ip_vs_lblcr_table *tbl = svc->sched_data; local
403 tbl
423 struct ip_vs_lblcr_table *tbl = svc->sched_data; local
460 struct ip_vs_lblcr_table *tbl = from_timer(tbl, t, periodic_timer); local
509 struct ip_vs_lblcr_table *tbl; local
547 struct ip_vs_lblcr_table *tbl = svc->sched_data; local
648 struct ip_vs_lblcr_table *tbl = svc->sched_data; local
[all...]
/linux-master/ipc/
H A Dmq_sysctl.c121 struct ctl_table *tbl; local
125 tbl = kmemdup(mq_sysctls, sizeof(mq_sysctls), GFP_KERNEL);
126 if (tbl) {
130 if (tbl[i].data == &init_ipc_ns.mq_queues_max)
131 tbl[i].data = &ns->mq_queues_max;
133 else if (tbl[i].data == &init_ipc_ns.mq_msg_max)
134 tbl[i].data = &ns->mq_msg_max;
136 else if (tbl[i].data == &init_ipc_ns.mq_msgsize_max)
137 tbl[i].data = &ns->mq_msgsize_max;
139 else if (tbl[
163 struct ctl_table *tbl; local
[all...]
H A Dipc_sysctl.c249 struct ctl_table *tbl; local
253 tbl = kmemdup(ipc_sysctls, sizeof(ipc_sysctls), GFP_KERNEL);
254 if (tbl) {
258 if (tbl[i].data == &init_ipc_ns.shm_ctlmax)
259 tbl[i].data = &ns->shm_ctlmax;
261 else if (tbl[i].data == &init_ipc_ns.shm_ctlall)
262 tbl[i].data = &ns->shm_ctlall;
264 else if (tbl[i].data == &init_ipc_ns.shm_ctlmni)
265 tbl[i].data = &ns->shm_ctlmni;
267 else if (tbl[
309 struct ctl_table *tbl; local
[all...]
/linux-master/drivers/gpu/drm/i915/
H A Di915_user_extensions.h16 const i915_user_extension_fn *tbl,
/linux-master/drivers/phy/qualcomm/
H A Dphy-qcom-qmp-common.h34 const struct qmp_phy_init_tbl tbl[],
39 const struct qmp_phy_init_tbl *t = tbl;
53 const struct qmp_phy_init_tbl tbl[],
56 qmp_configure_lane(base, tbl, num, 0xff);
33 qmp_configure_lane(void __iomem *base, const struct qmp_phy_init_tbl tbl[], int num, u8 lane_mask) argument
52 qmp_configure(void __iomem *base, const struct qmp_phy_init_tbl tbl[], int num) argument
/linux-master/net/core/
H A Dneighbour.c57 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
65 Neighbour hash table buckets are protected with rwlock tbl->lock.
123 atomic_dec(&n->tbl->gc_entries);
133 write_lock_bh(&n->tbl->lock);
147 atomic_dec(&n->tbl->gc_entries);
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
155 write_unlock_bh(&n->tbl->lock);
162 write_lock_bh(&n->tbl->lock);
173 list_add_tail(&n->managed_list, &n->tbl
208 neigh_del(struct neighbour *n, struct neighbour __rcu **np, struct neigh_table *tbl) argument
229 neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl) argument
252 neigh_forced_gc(struct neigh_table *tbl) argument
380 neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev, bool skip_perm) argument
434 neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) argument
442 __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, bool skip_perm) argument
455 neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev) argument
462 neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) argument
469 neigh_alloc(struct neigh_table *tbl, struct net_device *dev, u32 flags, bool exempt_from_gc) argument
577 neigh_hash_grow(struct neigh_table *tbl, unsigned long new_shift) argument
618 neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev) argument
639 ___neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev, u32 flags, bool exempt_from_gc, bool want_ref) argument
734 __neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev, bool want_ref) argument
767 __pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *pkey, struct net_device *dev) argument
778 pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *pkey, struct net_device *dev, int creat) argument
822 pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, struct net_device *dev) argument
847 pneigh_ifdown_and_unlock(struct neigh_table *tbl, struct net_device *dev) argument
948 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work); local
1510 neigh_event_ns(struct neigh_table *tbl, u8 *lladdr, void *saddr, struct net_device *dev) argument
1609 struct neigh_table *tbl = container_of(work, struct neigh_table, local
1623 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer); local
1668 pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p, struct sk_buff *skb) argument
1695 lookup_neigh_parms(struct neigh_table *tbl, struct net *net, int ifindex) argument
1709 neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl) argument
1752 neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) argument
1774 neigh_table_init(int index, struct neigh_table *tbl) argument
1833 neigh_table_clear(int index, struct neigh_table *tbl) argument
1863 struct neigh_table *tbl = NULL; local
1900 struct neigh_table *tbl; local
1967 struct neigh_table *tbl; local
2161 neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, u32 pid, u32 seq, int type, int flags) argument
2251 neightbl_fill_param_info(struct sk_buff *skb, struct neigh_table *tbl, struct neigh_parms *parms, u32 pid, u32 seq, int type, unsigned int flags) argument
2315 struct neigh_table *tbl; local
2508 struct neigh_table *tbl; local
2624 pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, u32 pid, u32 seq, int type, unsigned int flags, struct neigh_table *tbl) argument
2704 neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb, struct neigh_dump_filter *filter) argument
2751 pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb, struct neigh_dump_filter *filter) argument
2861 struct neigh_table *tbl; local
2903 neigh_valid_get_req(const struct nlmsghdr *nlh, struct neigh_table **tbl, void **dst, int *dev_idx, u8 *ndm_flags, struct netlink_ext_ack *extack) argument
3003 pneigh_get_reply(struct net *net, struct pneigh_entry *neigh, u32 pid, u32 seq, struct neigh_table *tbl) argument
3029 struct neigh_table *tbl = NULL; local
3085 neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie) argument
3108 __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)) argument
3147 struct neigh_table *tbl; local
3294 struct neigh_table *tbl = state->tbl; local
3317 struct neigh_table *tbl = state->tbl; local
3417 struct neigh_table *tbl = state->tbl; local
3428 struct neigh_table *tbl = pde_data(file_inode(seq->file)); local
3445 struct neigh_table *tbl = pde_data(file_inode(seq->file)); local
3465 struct neigh_table *tbl = pde_data(file_inode(seq->file)); local
3814 struct neigh_table *tbl = p->tbl; local
[all...]
/linux-master/arch/loongarch/kernel/
H A Defi.c99 struct efi_boot_memmap *tbl; local
129 tbl = early_memremap_ro(boot_memmap, sizeof(*tbl));
130 if (tbl) {
133 data.phys_map = boot_memmap + sizeof(*tbl);
134 data.size = tbl->map_size;
135 data.desc_size = tbl->desc_size;
136 data.desc_version = tbl->desc_ver;
141 early_memunmap(tbl, sizeof(*tbl));
[all...]
/linux-master/drivers/net/wireless/intel/iwlegacy/
H A D4965-rs.c389 il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) argument
391 if (tbl->expected_tpt)
392 return tbl->expected_tpt[rs_idx];
404 il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, argument
415 win = &(tbl->win[scale_idx]);
418 tpt = il4965_get_expected_tpt(tbl, scale_idx);
482 il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, argument
487 if (is_legacy(tbl->lq_type)) {
492 } else if (is_Ht(tbl->lq_type)) {
499 if (is_siso(tbl
536 il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, enum nl80211_band band, struct il_scale_tbl_info *tbl, int *rate_idx) argument
597 il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, struct il_scale_tbl_info *tbl) argument
714 il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta, struct il_scale_tbl_info *tbl, u8 scale_idx, u8 ht_possible) argument
1011 il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta, struct il_scale_tbl_info *tbl) argument
1064 il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta, struct il_scale_tbl_info *tbl, u16 rate_mask, s8 idx) argument
1158 il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta, struct ieee80211_conf *conf, struct ieee80211_sta *sta, struct il_scale_tbl_info *tbl, int idx) argument
1212 il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta, struct ieee80211_conf *conf, struct ieee80211_sta *sta, struct il_scale_tbl_info *tbl, int idx) argument
1263 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1383 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1506 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1632 struct il_scale_tbl_info *tbl; local
1710 il4965_rs_update_rate_tbl(struct il_priv *il, struct il_lq_sta *lq_sta, struct il_scale_tbl_info *tbl, int idx, u8 is_green) argument
1745 struct il_scale_tbl_info *tbl, *tbl1; local
2135 struct il_scale_tbl_info *tbl; local
2578 struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
2727 struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; local
[all...]
/linux-master/drivers/vfio/
H A Dvfio_iommu_spapr_tce.c209 struct iommu_table *tbl = container->tables[i]; local
211 if (tbl) {
212 unsigned long entry = ioba >> tbl->it_page_shift;
213 unsigned long start = tbl->it_offset;
214 unsigned long end = start + tbl->it_size;
217 *ptbl = tbl;
339 struct iommu_table *tbl,
342 struct iommu_table *tbl);
362 struct iommu_table *tbl = container->tables[i]; local
364 if (!tbl)
410 tce_iommu_unuse_page_v2(struct tce_container *container, struct iommu_table *tbl, unsigned long entry) argument
432 tce_iommu_clear(struct tce_container *container, struct iommu_table *tbl, unsigned long entry, unsigned long pages) argument
500 tce_iommu_build(struct tce_container *container, struct iommu_table *tbl, unsigned long entry, unsigned long tce, unsigned long pages, enum dma_data_direction direction) argument
548 tce_iommu_build_v2(struct tce_container *container, struct iommu_table *tbl, unsigned long entry, unsigned long tce, unsigned long pages, enum dma_data_direction direction) argument
635 tce_iommu_free_table(struct tce_container *container, struct iommu_table *tbl) argument
650 struct iommu_table *tbl = NULL; local
713 struct iommu_table *tbl; local
907 struct iommu_table *tbl = NULL; local
974 struct iommu_table *tbl = NULL; local
1212 struct iommu_table *tbl = container->tables[i]; local
[all...]
/linux-master/drivers/net/wireless/marvell/mwifiex/
H A D11n_rxreorder.c98 struct mwifiex_rx_reorder_tbl *tbl,
108 pkt_to_send = (start_win > tbl->start_win) ?
109 min((start_win - tbl->start_win), tbl->win_size) :
110 tbl->win_size;
113 if (tbl->rx_reorder_ptr[i]) {
114 skb = tbl->rx_reorder_ptr[i];
116 tbl->rx_reorder_ptr[i] = NULL;
124 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
125 tbl
97 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl, int start_win) argument
145 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) argument
189 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, struct mwifiex_rx_reorder_tbl *tbl) argument
232 struct mwifiex_rx_reorder_tbl *tbl; local
251 struct mwifiex_rx_reorder_tbl *tbl, *tmp; local
333 struct mwifiex_rx_reorder_tbl *tbl, *new_node; local
410 mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl) argument
551 struct mwifiex_rx_reorder_tbl *tbl; local
663 struct mwifiex_rx_reorder_tbl *tbl; local
717 struct mwifiex_rx_reorder_tbl *tbl; local
808 struct mwifiex_rx_reorder_tbl *tbl; local
[all...]
/linux-master/drivers/net/wireless/intel/iwlwifi/dvm/
H A Drs.c414 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) argument
416 if (tbl->expected_tpt)
417 return tbl->expected_tpt[rs_index];
428 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, argument
439 window = &(tbl->win[scale_index]);
442 tpt = get_expected_tpt(tbl, scale_index);
507 struct iwl_scale_tbl_info *tbl,
512 if (is_legacy(tbl->lq_type)) {
517 } else if (is_Ht(tbl->lq_type)) {
524 if (is_siso(tbl
506 rate_n_flags_from_tbl(struct iwl_priv *priv, struct iwl_scale_tbl_info *tbl, int index, u8 use_green) argument
562 rs_get_tbl_info_from_mcs(const u32 rate_n_flags, enum nl80211_band band, struct iwl_scale_tbl_info *tbl, int *rate_idx) argument
628 rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, struct iwl_scale_tbl_info *tbl) argument
752 rs_get_lower_rate(struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, u8 scale_index, u8 ht_possible) argument
825 struct iwl_scale_tbl_info *tbl; local
1064 rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl) argument
1120 rs_get_best_rate(struct iwl_priv *priv, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, u16 rate_mask, s8 index) argument
1213 rs_switch_to_mimo2(struct iwl_priv *priv, struct iwl_lq_sta *lq_sta, struct ieee80211_conf *conf, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, int index) argument
1268 rs_switch_to_mimo3(struct iwl_priv *priv, struct iwl_lq_sta *lq_sta, struct ieee80211_conf *conf, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, int index) argument
1324 rs_switch_to_siso(struct iwl_priv *priv, struct iwl_lq_sta *lq_sta, struct ieee80211_conf *conf, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, int index) argument
1379 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1552 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1722 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
1890 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
2063 struct iwl_scale_tbl_info *tbl; local
2140 rs_update_rate_tbl(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_lq_sta *lq_sta, struct iwl_scale_tbl_info *tbl, int index, u8 is_green) argument
2178 struct iwl_scale_tbl_info *tbl, *tbl1; local
2611 struct iwl_scale_tbl_info *tbl; local
3094 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); local
3222 struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; local
[all...]

Completed in 563 milliseconds

1234567891011>>