Lines Matching refs:tm

54 static inline struct net *tm_net(const struct tcp_metrics_block *tm)
57 return READ_ONCE(tm->tcpm_net);
60 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
64 return READ_ONCE(tm->tcpm_lock) & (1 << idx);
67 static u32 tcp_metric_get(const struct tcp_metrics_block *tm,
71 return READ_ONCE(tm->tcpm_vals[idx]);
74 static void tcp_metric_set(struct tcp_metrics_block *tm,
79 WRITE_ONCE(tm->tcpm_vals[idx], val);
98 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
105 WRITE_ONCE(tm->tcpm_stamp, jiffies);
119 WRITE_ONCE(tm->tcpm_lock, val);
122 tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC);
125 tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC);
126 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
128 tcp_metric_set(tm, TCP_METRIC_CWND,
130 tcp_metric_set(tm, TCP_METRIC_REORDERING,
134 tm->tcpm_fastopen.mss = 0;
135 tm->tcpm_fastopen.syn_loss = 0;
136 tm->tcpm_fastopen.try_exp = 0;
137 tm->tcpm_fastopen.cookie.exp = false;
138 tm->tcpm_fastopen.cookie.len = 0;
145 static void tcpm_check_stamp(struct tcp_metrics_block *tm,
150 if (!tm)
152 limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT;
154 tcpm_suck_dst(tm, dst, false);
168 struct tcp_metrics_block *tm;
178 tm = __tcp_get_metrics(saddr, daddr, net, hash);
179 if (tm == TCP_METRICS_RECLAIM_PTR) {
181 tm = NULL;
183 if (tm) {
184 tcpm_check_stamp(tm, dst);
192 for (tm = deref_locked(oldest->tcpm_next); tm;
193 tm = deref_locked(tm->tcpm_next)) {
194 if (time_before(READ_ONCE(tm->tcpm_stamp),
196 oldest = tm;
198 tm = oldest;
200 tm = kzalloc(sizeof(*tm), GFP_ATOMIC);
201 if (!tm)
205 WRITE_ONCE(tm->tcpm_net, net);
207 tm->tcpm_saddr = *saddr;
208 tm->tcpm_daddr = *daddr;
210 tcpm_suck_dst(tm, dst, reclaim);
213 tm->tcpm_next = tcp_metrics_hash[hash].chain;
214 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
219 return tm;
222 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
224 if (tm)
225 return tm;
235 struct tcp_metrics_block *tm;
238 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
239 tm = rcu_dereference(tm->tcpm_next)) {
240 if (addr_same(&tm->tcpm_saddr, saddr) &&
241 addr_same(&tm->tcpm_daddr, daddr) &&
242 net_eq(tm_net(tm), net))
246 return tcp_get_encode(tm, depth);
252 struct tcp_metrics_block *tm;
280 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
281 tm = rcu_dereference(tm->tcpm_next)) {
282 if (addr_same(&tm->tcpm_saddr, &saddr) &&
283 addr_same(&tm->tcpm_daddr, &daddr) &&
284 net_eq(tm_net(tm), net))
287 tcpm_check_stamp(tm, dst);
288 return tm;
295 struct tcp_metrics_block *tm;
325 tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
326 if (tm == TCP_METRICS_RECLAIM_PTR)
327 tm = NULL;
328 if (!tm && create)
329 tm = tcpm_new(dst, &saddr, &daddr, hash);
331 tcpm_check_stamp(tm, dst);
333 return tm;
346 struct tcp_metrics_block *tm;
361 tm = tcp_get_metrics(sk, dst, false);
362 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
363 tcp_metric_set(tm, TCP_METRIC_RTT, 0);
366 tm = tcp_get_metrics(sk, dst, true);
368 if (!tm)
371 rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
378 if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
383 tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
386 if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
397 var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
403 tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
409 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
410 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
412 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
415 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416 val = tcp_metric_get(tm, TCP_METRIC_CWND);
418 tcp_metric_set(tm, TCP_METRIC_CWND,
425 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
426 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
428 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
429 val = tcp_metric_get(tm, TCP_METRIC_CWND);
430 tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
436 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
437 val = tcp_metric_get(tm, TCP_METRIC_CWND);
438 tcp_metric_set(tm, TCP_METRIC_CWND,
442 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
443 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
445 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
448 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
449 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
453 tcp_metric_set(tm, TCP_METRIC_REORDERING,
457 WRITE_ONCE(tm->tcpm_stamp, jiffies);
469 struct tcp_metrics_block *tm;
481 tm = tcp_get_metrics(sk, dst, false);
482 if (!tm) {
487 if (tcp_metric_locked(tm, TCP_METRIC_CWND))
488 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
491 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
497 val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
501 crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
544 struct tcp_metrics_block *tm;
551 tm = __tcp_get_metrics_req(req, dst);
552 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
564 struct tcp_metrics_block *tm;
567 tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
568 if (tm) {
569 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
589 struct tcp_metrics_block *tm;
594 tm = tcp_get_metrics(sk, dst, true);
595 if (tm) {
596 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
640 struct tcp_metrics_block *tm)
645 switch (tm->tcpm_daddr.family) {
648 inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
651 inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
656 inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
659 inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
667 jiffies - READ_ONCE(tm->tcpm_stamp),
678 u32 val = tcp_metric_get(tm, i);
712 tfom_copy[0] = tm->tcpm_fastopen;
741 struct tcp_metrics_block *tm)
751 if (tcp_metrics_fill_info(skb, tm) < 0)
772 struct tcp_metrics_block *tm;
776 for (col = 0, tm = rcu_dereference(hb->chain); tm;
777 tm = rcu_dereference(tm->tcpm_next), col++) {
778 if (!net_eq(tm_net(tm), net))
782 res = tcp_metrics_dump_info(skb, cb, tm);
841 struct tcp_metrics_block *tm;
871 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
872 tm = rcu_dereference(tm->tcpm_next)) {
873 if (addr_same(&tm->tcpm_daddr, &daddr) &&
874 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
875 net_eq(tm_net(tm), net)) {
876 ret = tcp_metrics_fill_info(msg, tm);
899 struct tcp_metrics_block *tm;
910 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
911 match = net ? net_eq(tm_net(tm), net) :
912 !refcount_read(&tm_net(tm)->ns.count);
914 rcu_assign_pointer(*pp, tm->tcpm_next);
915 kfree_rcu(tm, rcu_head);
917 pp = &tm->tcpm_next;
928 struct tcp_metrics_block *tm;
952 for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
953 if (addr_same(&tm->tcpm_daddr, &daddr) &&
954 (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
955 net_eq(tm_net(tm), net)) {
956 rcu_assign_pointer(*pp, tm->tcpm_next);
957 kfree_rcu(tm, rcu_head);
960 pp = &tm->tcpm_next;