Searched refs:tb (Results 151 - 175 of 473) sorted by relevance

1234567891011>>

/linux-master/net/bridge/
H A Dbr_netlink.c930 static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[], argument
933 if (!tb[attrtype])
936 if (nla_get_u8(tb[attrtype]))
943 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[], argument
953 br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
954 br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
955 br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE,
957 br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
958 br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
959 br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOO
1089 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; local
1171 br_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) argument
1200 br_port_slave_changelink(struct net_device *brdev, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) argument
1275 br_changelink(struct net_device *brdev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) argument
1556 br_dev_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) argument
[all...]
/linux-master/net/core/
H A Dlwt_bpf.c335 struct nlattr *tb[LWT_BPF_PROG_MAX + 1]; local
340 ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
345 if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
348 prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
352 fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
374 struct nlattr *tb[LWT_BPF_MAX + 1]; local
382 ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
387 if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OU
[all...]
H A Dnet_namespace.c776 struct nlattr *tb[NETNSA_MAX + 1]; local
781 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
785 if (!tb[NETNSA_NSID]) {
789 nsid = nla_get_s32(tb[NETNSA_NSID]);
791 if (tb[NETNSA_PID]) {
792 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
793 nla = tb[NETNSA_PID];
794 } else if (tb[NETNSA_FD]) {
795 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
796 nla = tb[NETNSA_F
879 rtnl_net_valid_getid_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) argument
920 struct nlattr *tb[NETNSA_MAX + 1]; local
1029 struct nlattr *tb[NETNSA_MAX + 1]; local
[all...]
/linux-master/net/sched/
H A Dcls_route.c386 struct nlattr **tb, struct nlattr *est, int new,
395 err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
399 if (tb[TCA_ROUTE4_TO]) {
404 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
408 if (tb[TCA_ROUTE4_FROM] && tb[TCA_ROUTE4_IIF]) {
409 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_ROUTE4_FROM],
414 if (tb[TCA_ROUTE4_FROM]) {
415 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
417 } else if (tb[TCA_ROUTE4_II
383 route4_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct route4_filter *f, u32 handle, struct route4_head *head, struct nlattr **tb, struct nlattr *est, int new, u32 flags, struct netlink_ext_ack *extack) argument
485 struct nlattr *tb[TCA_ROUTE4_MAX + 1]; local
[all...]
H A Dsch_red.c234 static int __red_change(struct Qdisc *sch, struct nlattr **tb, argument
247 if (tb[TCA_RED_PARMS] == NULL ||
248 tb[TCA_RED_STAB] == NULL)
251 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
253 ctl = nla_data(tb[TCA_RED_PARMS]);
254 stab = nla_data(tb[TCA_RED_STAB]);
260 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
339 struct nlattr *tb[TCA_RED_MAX + 1]; local
349 err = nla_parse_nested_deprecated(tb, TCA_RED_MA
373 struct nlattr *tb[TCA_RED_MAX + 1]; local
[all...]
H A Dsch_pie.c142 struct nlattr *tb[TCA_PIE_MAX + 1]; local
146 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
154 if (tb[TCA_PIE_TARGET]) {
156 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
164 if (tb[TCA_PIE_TUPDATE])
166 usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])));
168 if (tb[TCA_PIE_LIMIT]) {
169 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
175 if (tb[TCA_PIE_ALPHA])
176 WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPH
[all...]
H A Dact_simple.c95 struct nlattr *tb[TCA_DEF_MAX + 1]; local
106 err = nla_parse_nested_deprecated(tb, TCA_DEF_MAX, nla, simple_policy,
111 if (tb[TCA_DEF_PARMS] == NULL)
114 parm = nla_data(tb[TCA_DEF_PARMS]);
123 if (tb[TCA_DEF_DATA] == NULL) {
145 err = alloc_defdata(d, tb[TCA_DEF_DATA]);
157 err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
H A Dcls_flow.c398 struct nlattr *tb[TCA_FLOW_MAX + 1]; local
409 err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
414 if (tb[TCA_FLOW_BASECLASS]) {
415 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
420 if (tb[TCA_FLOW_KEYS]) {
421 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
439 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
447 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags,
473 if (tb[TCA_FLOW_MODE])
474 mode = nla_get_u32(tb[TCA_FLOW_MOD
[all...]
H A Dsch_fq.c997 struct nlattr *tb[TCA_FQ_MAX + 1]; local
1002 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
1011 if (tb[TCA_FQ_BUCKETS_LOG]) {
1012 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
1019 if (tb[TCA_FQ_PLIMIT])
1021 nla_get_u32(tb[TCA_FQ_PLIMIT]));
1023 if (tb[TCA_FQ_FLOW_PLIMIT])
1025 nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]));
1027 if (tb[TCA_FQ_QUANTUM]) {
1028 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTU
[all...]
/linux-master/net/dcb/
H A Ddcbnl.c242 u32 seq, struct nlattr **tb, struct sk_buff *skb)
244 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
253 u32 seq, struct nlattr **tb, struct sk_buff *skb)
261 if (!tb[DCB_ATTR_PFC_CFG])
268 tb[DCB_ATTR_PFC_CFG],
298 u32 seq, struct nlattr **tb, struct sk_buff *skb)
312 u32 seq, struct nlattr **tb, struct sk_buff *skb)
320 if (!tb[DCB_ATTR_CAP])
327 tb[DCB_ATTR_CAP], dcbnl_cap_nest,
357 u32 seq, struct nlattr **tb, struc
241 dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
252 dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
297 dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
311 dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
356 dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
403 dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
437 dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
447 dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
465 dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
535 dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
585 __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) argument
721 dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
727 dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
733 dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
750 dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
781 dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
799 __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) argument
892 dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
898 dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
904 dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
966 dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1560 dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1696 dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1707 dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1750 dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1760 dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1777 dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1823 dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1861 dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) argument
1917 struct nlattr *tb[DCB_ATTR_MAX + 1]; local
[all...]
/linux-master/net/ipv6/
H A Dioam6_iptunnel.c107 struct nlattr *tb[IOAM6_IPTUNNEL_MAX + 1]; local
119 err = nla_parse_nested(tb, IOAM6_IPTUNNEL_MAX, nla,
124 if ((!tb[IOAM6_IPTUNNEL_FREQ_K] && tb[IOAM6_IPTUNNEL_FREQ_N]) ||
125 (tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N])) {
128 } else if (!tb[IOAM6_IPTUNNEL_FREQ_K] && !tb[IOAM6_IPTUNNEL_FREQ_N]) {
132 freq_k = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_K]);
133 freq_n = nla_get_u32(tb[IOAM6_IPTUNNEL_FREQ_
[all...]
/linux-master/net/ethtool/
H A Dchannels.c116 struct nlattr **tb = info->attrs; local
125 ethnl_update_u32(&channels.rx_count, tb[ETHTOOL_A_CHANNELS_RX_COUNT],
127 ethnl_update_u32(&channels.tx_count, tb[ETHTOOL_A_CHANNELS_TX_COUNT],
130 tb[ETHTOOL_A_CHANNELS_OTHER_COUNT], &mod);
132 tb[ETHTOOL_A_CHANNELS_COMBINED_COUNT], &mod_combined);
149 NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
164 NL_SET_ERR_MSG_ATTR(info->extack, tb[err_attr],
H A Dmm.c188 struct nlattr **tb = info->attrs; local
199 ethnl_update_bool(&cfg.verify_enabled, tb[ETHTOOL_A_MM_VERIFY_ENABLED],
201 ethnl_update_u32(&cfg.verify_time, tb[ETHTOOL_A_MM_VERIFY_TIME], &mod);
202 ethnl_update_bool(&cfg.tx_enabled, tb[ETHTOOL_A_MM_TX_ENABLED], &mod);
203 ethnl_update_bool(&cfg.pmac_enabled, tb[ETHTOOL_A_MM_PMAC_ENABLED],
206 tb[ETHTOOL_A_MM_TX_MIN_FRAG_SIZE], &mod);
212 NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_MM_VERIFY_TIME],
H A Dpause.c31 struct nlattr **tb,
37 if (tb[ETHTOOL_A_PAUSE_STATS_SRC]) {
44 src = nla_get_u32(tb[ETHTOOL_A_PAUSE_STATS_SRC]);
187 struct nlattr **tb = info->attrs; local
193 ethnl_update_bool32(&params.autoneg, tb[ETHTOOL_A_PAUSE_AUTONEG], &mod);
194 ethnl_update_bool32(&params.rx_pause, tb[ETHTOOL_A_PAUSE_RX], &mod);
195 ethnl_update_bool32(&params.tx_pause, tb[ETHTOOL_A_PAUSE_TX], &mod);
30 pause_parse_request(struct ethnl_req_info *req_base, struct nlattr **tb, struct netlink_ext_ack *extack) argument
/linux-master/net/netfilter/
H A Dnft_redir.c46 const struct nlattr * const tb[])
53 if (tb[NFTA_REDIR_REG_PROTO_MIN]) {
54 err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN],
59 if (tb[NFTA_REDIR_REG_PROTO_MAX]) {
60 err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MAX],
72 if (tb[NFTA_REDIR_FLAGS])
73 priv->flags = ntohl(nla_get_be32(tb[NFTA_REDIR_FLAGS]));
44 nft_redir_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
H A Dnft_masq.c45 const struct nlattr * const tb[])
51 if (tb[NFTA_MASQ_FLAGS])
52 priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
54 if (tb[NFTA_MASQ_REG_PROTO_MIN]) {
55 err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MIN],
60 if (tb[NFTA_MASQ_REG_PROTO_MAX]) {
61 err = nft_parse_register_load(tb[NFTA_MASQ_REG_PROTO_MAX],
43 nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
H A Dnft_socket.c148 const struct nlattr * const tb[])
153 if (!tb[NFTA_SOCKET_DREG] || !tb[NFTA_SOCKET_KEY])
167 priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY]));
180 if (!tb[NFTA_SOCKET_LEVEL])
183 level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL]));
197 return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg,
146 nft_socket_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
H A Dnft_xfrm.c35 const struct nlattr * const tb[])
42 if (!tb[NFTA_XFRM_KEY] || !tb[NFTA_XFRM_DIR] || !tb[NFTA_XFRM_DREG])
54 priv->key = ntohl(nla_get_be32(tb[NFTA_XFRM_KEY]));
72 dir = nla_get_u8(tb[NFTA_XFRM_DIR]);
82 if (tb[NFTA_XFRM_SPNUM])
83 spnum = ntohl(nla_get_be32(tb[NFTA_XFRM_SPNUM]));
91 return nft_parse_register_store(ctx, tb[NFTA_XFRM_DREG], &priv->dreg,
33 nft_xfrm_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
H A Dnft_connlimit.c60 const struct nlattr * const tb[],
67 if (!tb[NFTA_CONNLIMIT_COUNT])
70 limit = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_COUNT]));
72 if (tb[NFTA_CONNLIMIT_FLAGS]) {
73 flags = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_FLAGS]));
132 const struct nlattr * const tb[],
137 return nft_connlimit_do_init(ctx, tb, priv);
198 const struct nlattr * const tb[])
202 return nft_connlimit_do_init(ctx, tb, priv);
59 nft_connlimit_do_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_connlimit *priv) argument
131 nft_connlimit_obj_init(const struct nft_ctx *ctx, const struct nlattr * const tb[], struct nft_object *obj) argument
196 nft_connlimit_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
H A Dnft_dup_netdev.c36 const struct nlattr * const tb[])
40 if (tb[NFTA_DUP_SREG_DEV] == NULL)
43 return nft_parse_register_load(tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev,
34 nft_dup_netdev_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) argument
/linux-master/net/mpls/
H A Dmpls_iptunnel.c169 struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1]; local
174 ret = nla_parse_nested_deprecated(tb, MPLS_IPTUNNEL_MAX, nla,
179 if (!tb[MPLS_IPTUNNEL_DST]) {
185 if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
195 ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels,
203 if (tb[MPLS_IPTUNNEL_TTL]) {
204 tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]);
/linux-master/include/linux/netfilter/ipset/
H A Dip_set.h163 int (*uadt)(struct ip_set *set, struct nlattr *tb[],
231 struct nlattr *tb[], u32 flags);
335 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
337 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
377 ip_set_attr_netorder(struct nlattr *tb[], int type) argument
379 return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
383 ip_set_optattr_netorder(struct nlattr *tb[], int type) argument
385 return !tb[type] || (tb[typ
468 ip_set_timeout_uget(struct nlattr *tb) argument
[all...]
/linux-master/drivers/thunderbolt/
H A Dretimer.c15 #include "tb.h"
43 if (!mutex_trylock(&rt->tb->lock)) {
49 mutex_unlock(&rt->tb->lock);
64 if (!mutex_trylock(&rt->tb->lock))
68 mutex_unlock(&rt->tb->lock);
176 if (!mutex_trylock(&rt->tb->lock))
186 mutex_unlock(&rt->tb->lock);
253 if (!mutex_trylock(&rt->tb->lock)) {
300 mutex_unlock(&rt->tb->lock);
317 if (!mutex_trylock(&rt->tb
[all...]
/linux-master/arch/sparc/kernel/
H A Dirq_64.c1001 struct trap_per_cpu *tb = &trap_block[this_cpu]; local
1003 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
1004 tb->cpu_mondo_qmask);
1005 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
1006 tb->dev_mondo_qmask);
1007 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
1008 tb->resum_qmask);
1009 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
1010 tb->nonresum_qmask);
1032 static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) argument
1065 struct trap_per_cpu *tb = &trap_block[cpu]; local
1082 struct trap_per_cpu *tb = &trap_block[cpu]; local
[all...]
/linux-master/drivers/net/wireless/ath/ath12k/
H A Dwmi.c349 const void **tb = data; local
352 tb[tag] = ptr;
357 static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb, argument
361 (void *)tb);
368 const void **tb; local
371 tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
372 if (!tb)
375 ret = ath12k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
377 kfree(tb);
4586 const void **tb; local
4657 const void **tb; local
4950 const void **tb; local
4982 const void **tb; local
5010 const void **tb; local
5038 const void **tb; local
5182 const void **tb; local
5363 const void **tb; local
5396 const void **tb; local
5423 const void **tb; local
5475 const void **tb; local
5514 const void **tb; local
5554 const void **tb; local
5585 const void **tb; local
5614 const void **tb; local
6617 const void **tb; local
6681 const void **tb; local
6715 const void **tb; local
6795 const void **tb; local
6825 const void **tb; local
6857 const void **tb; local
6908 const void **tb; local
6947 const void **tb; local
6976 const void **tb; local
[all...]

Completed in 177 milliseconds

1234567891011>>