Lines Matching refs:peer

26  * Find the peer associated with a local error.
103 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
106 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu)
107 peer->if_mtu = mtu;
111 mtu = peer->if_mtu;
118 if (mtu < peer->hdrsize)
119 mtu = peer->hdrsize + 4;
123 if (mtu < peer->mtu) {
124 spin_lock(&peer->lock);
125 peer->mtu = mtu;
126 peer->maxdata = peer->mtu - peer->hdrsize;
127 spin_unlock(&peer->lock);
138 struct rxrpc_peer *peer = NULL;
148 peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
149 if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error))
150 peer = NULL;
152 if (!peer)
155 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
160 rxrpc_adjust_mtu(peer, serr->ee.ee_info);
164 rxrpc_store_error(peer, skb);
166 rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
170 * Map an error report to error codes on the peer record.
172 static void rxrpc_store_error(struct rxrpc_peer *peer, struct sk_buff *skb)
196 rxrpc_distribute_error(peer, skb, compl, err);
200 * Distribute an error that occurred on a peer.
202 static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
208 spin_lock(&peer->lock);
209 hlist_move_list(&peer->error_targets, &error_targets);
215 spin_unlock(&peer->lock);
221 spin_lock(&peer->lock);
224 spin_unlock(&peer->lock);
235 struct rxrpc_peer *peer;
244 peer = list_entry(collector->next,
247 list_del_init(&peer->keepalive_link);
248 if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
251 use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
255 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
257 _debug("%02x peer %u t=%d {%pISp}",
258 cursor, peer->debug_id, slot, &peer->srx.transport);
262 rxrpc_send_keepalive(peer);
266 /* A transmission to this peer occurred since last we
273 list_add_tail(&peer->keepalive_link,
276 rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
278 rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);