Searched refs:tb2 (Results 1 - 11 of 11) sorted by relevance

/linux-master/net/ipv4/
H A Dinet_hashtables.c102 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb2, argument
108 write_pnet(&tb2->ib_net, net);
109 tb2->l3mdev = tb->l3mdev;
110 tb2->port = tb->port;
114 tb2->addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
115 tb2->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
117 tb2->addr_type = IPV6_ADDR_MAPPED;
118 ipv6_addr_set_v4mapped(sk->sk_rcv_saddr, &tb2->v6_rcv_saddr);
121 tb2->rcv_saddr = sk->sk_rcv_saddr;
123 INIT_HLIST_HEAD(&tb2
134 struct inet_bind2_bucket *tb2 = kmem_cache_alloc(cachep, GFP_ATOMIC); local
152 inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, const struct sock *sk) argument
165 inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, struct inet_bind2_bucket *tb2, unsigned short port) argument
196 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; local
224 struct inet_bind2_bucket *tb2; local
893 struct inet_bind2_bucket *tb2, *new_tb2; local
1003 struct inet_bind2_bucket *tb2; local
[all...]
H A Dinet_connection_sock.c221 const struct inet_bind2_bucket *tb2,
228 sk_for_each_bound(sk2, &tb2->owners) {
241 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
244 const struct inet_bind2_bucket *tb2, /* may be null */
264 return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax,
269 * in tb->owners and tb2->owners list belong
272 sk_for_each_bound_bhash(sk2, tb2, tb) {
296 struct inet_bind2_bucket *tb2; local
310 inet_bind_bucket_for_each(tb2,
220 inet_bhash2_conflict(const struct sock *sk, const struct inet_bind2_bucket *tb2, kuid_t sk_uid, bool relax, bool reuseport_cb_ok, bool reuseport_ok) argument
242 inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb, const struct inet_bind2_bucket *tb2, bool relax, bool reuseport_ok) argument
339 struct inet_bind2_bucket *tb2; local
523 struct inet_bind2_bucket *tb2 = NULL; local
[all...]
H A Dinet_timewait_sock.c32 struct inet_bind2_bucket *tb2 = tw->tw_tb2; local
41 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
H A Dinet_diag.c1106 struct inet_bind2_bucket *tb2; local
1121 inet_bind_bucket_for_each(tb2, &ibb->chain) {
1122 if (!net_eq(ib2_net(tb2), net))
1125 sk_for_each_bound(sk, &tb2->owners) {
/linux-master/tools/power/x86/intel-speed-select/
H A Dhfi-events.c136 struct nlattr *tb2[CTRL_ATTR_MCAST_GRP_MAX + 1]; local
137 nla_parse(tb2, CTRL_ATTR_MCAST_GRP_MAX, nla_data(mcgrp),
139 if (!tb2[CTRL_ATTR_MCAST_GRP_NAME] ||
140 !tb2[CTRL_ATTR_MCAST_GRP_ID] ||
141 strncmp(nla_data(tb2[CTRL_ATTR_MCAST_GRP_NAME]),
143 nla_len(tb2[CTRL_ATTR_MCAST_GRP_NAME])) != 0)
145 res->id = nla_get_u32(tb2[CTRL_ATTR_MCAST_GRP_ID]);
/linux-master/arch/powerpc/include/asm/
H A Dimc-pmu.h84 __be64 tb2; member in struct:trace_imc_data
/linux-master/drivers/net/wireless/ath/wil6210/
H A Dcfg80211.c2998 struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1]; local
3048 rc = nla_parse_nested_deprecated(tb2,
3058 if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
3059 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
3060 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
3061 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
3062 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
3063 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
3064 !tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
3070 tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDE
[all...]
/linux-master/net/sched/
H A Dact_ife.c488 struct nlattr *tb2[IFE_META_MAX + 1]; local
528 err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
535 err = load_metalist(tb2, !(flags & TCA_ACT_FLAGS_NO_RTNL));
603 err = populate_metalist(ife, tb2, exists,
H A Dact_api.c2139 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; local
2152 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
2154 kind = tb2[TCA_ACT_KIND];
/linux-master/include/net/
H A Dinet_hashtables.h287 struct inet_bind2_bucket *tb2, unsigned short port);
/linux-master/arch/powerpc/perf/
H A Dimc-pmu.c1290 be64_to_cpu(READ_ONCE(mem->tb2)))

Completed in 252 milliseconds