Lines Matching refs:call

23 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
26 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
28 if (!call->send_abort && !rxrpc_call_is_complete(call)) {
29 call->send_abort_why = why;
30 call->send_abort_err = error;
31 call->send_abort_seq = 0;
33 smp_store_release(&call->send_abort, abort_code);
34 rxrpc_poke_call(call, rxrpc_call_poke_abort);
42 * Wait for a call to become connected. Interruption here doesn't cause the
43 * call to be aborted.
45 static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
50 _enter("%d", call->debug_id);
52 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
55 add_wait_queue_exclusive(&call->waitq, &myself);
58 switch (call->interruptibility) {
69 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
71 if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
72 call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
80 remove_wait_queue(&call->waitq, &myself);
84 if (ret == 0 && rxrpc_call_is_complete(call))
85 ret = call->error;
94 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
97 *_tx_win = call->tx_bottom;
98 return call->tx_prepared - call->tx_bottom < 256;
105 struct rxrpc_call *call,
110 if (rxrpc_check_tx_space(call, NULL))
113 if (rxrpc_call_is_complete(call))
114 return call->error;
119 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
129 struct rxrpc_call *call)
134 rtt = READ_ONCE(call->peer->srtt_us) >> 3;
140 tx_start = smp_load_acquire(&call->acks_hard_ack);
145 if (rxrpc_check_tx_space(call, &tx_win))
148 if (rxrpc_call_is_complete(call))
149 return call->error;
160 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
169 struct rxrpc_call *call,
174 if (rxrpc_check_tx_space(call, NULL))
177 if (rxrpc_call_is_complete(call))
178 return call->error;
180 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
190 struct rxrpc_call *call,
198 call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
200 add_wait_queue(&call->waitq, &myself);
202 switch (call->interruptibility) {
205 ret = rxrpc_wait_for_tx_window_waitall(rx, call);
207 ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
212 ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
216 remove_wait_queue(&call->waitq, &myself);
223 * Notify the owner of the call that the transmit phase is ended and the last
226 static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
230 notify_end_tx(&rx->sk, call, call->user_call_ID);
238 static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
245 rxrpc_inc_stat(call->rxnet, stat_tx_data);
247 ASSERTCMP(txb->seq, ==, call->tx_prepared + 1);
255 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue_last);
257 trace_rxrpc_txqueue(call, rxrpc_txqueue_queue);
259 /* Add the packet to the call's output buffer */
260 spin_lock(&call->tx_lock);
261 poke = list_empty(&call->tx_sendmsg);
262 list_add_tail(&txb->call_link, &call->tx_sendmsg);
263 call->tx_prepared = seq;
265 rxrpc_notify_end_tx(rx, call, notify_end_tx);
266 spin_unlock(&call->tx_lock);
269 rxrpc_poke_call(call, rxrpc_call_poke_start);
275 * - The caller holds the call user access mutex, but not the socket lock.
278 struct rxrpc_call *call,
292 ret = rxrpc_wait_to_be_connected(call, &timeo);
296 if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
297 ret = rxrpc_init_client_conn_security(call->conn);
309 state = rxrpc_call_state(call);
317 /* Request phase complete for this client call */
318 trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
319 call->cid, call->call_id, call->rx_consumed,
325 if (call->tx_total_len != -1) {
326 if (len - copied > call->tx_total_len)
328 if (!more && len - copied != call->tx_total_len)
332 txb = call->tx_pending;
333 call->tx_pending = NULL;
343 if (!rxrpc_check_tx_space(call, NULL))
351 txb = call->conn->security->alloc_txbuf(call, remain, sk->sk_allocation);
373 if (call->tx_total_len != -1)
374 call->tx_total_len -= copy;
377 /* check for the far side aborting the call or a network error
379 if (rxrpc_call_is_complete(call))
387 else if (call->tx_top - call->acks_hard_ack <
388 call->tx_winsize)
391 ret = call->security->secure_packet(call, txb);
397 rxrpc_queue_packet(rx, call, txb, notify_end_tx);
404 if (rxrpc_call_is_complete(call) &&
405 call->error < 0)
406 ret = call->error;
408 call->tx_pending = txb;
414 _leave(" = %d", call->error);
415 return call->error;
430 mutex_unlock(&call->user_mutex);
432 ret = rxrpc_wait_for_tx_window(rx, call, &timeo,
436 if (call->interruptibility == RXRPC_INTERRUPTIBLE) {
437 if (mutex_lock_interruptible(&call->user_mutex) < 0) {
442 mutex_lock(&call->user_mutex);
476 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
480 p->call.user_call_ID = *(unsigned long *)
518 if (p->call.tx_total_len != -1 || len != sizeof(__s64))
520 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
521 if (p->call.tx_total_len < 0)
528 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
529 p->call.nr_timeouts = len / 4;
530 if (p->call.timeouts.hard > INT_MAX / HZ)
532 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
534 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
545 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
552 * Create a new client call for sendmsg().
554 * - If it returns a call, the call's lock will need releasing by the caller.
560 __acquires(&call->user_mutex)
564 struct rxrpc_call *call;
594 call = rxrpc_new_client_call(rx, &cp, &p->call, GFP_KERNEL,
599 _leave(" = %p\n", call);
600 return call;
604 * send a message forming part of a client call through an RxRPC socket
611 struct rxrpc_call *call;
616 .call.tx_total_len = -1,
617 .call.user_call_ID = 0,
618 .call.nr_timeouts = 0,
619 .call.interruptibility = RXRPC_INTERRUPTIBLE,
636 ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID);
640 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
641 if (!call) {
645 call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
647 if (IS_ERR(call))
648 return PTR_ERR(call);
649 /* ... and we have the call lock. */
650 p.call.nr_timeouts = 0;
652 if (rxrpc_call_is_complete(call))
655 switch (rxrpc_call_state(call)) {
663 rxrpc_put_call(call, rxrpc_call_put_sendmsg);
670 ret = mutex_lock_interruptible(&call->user_mutex);
677 if (p.call.tx_total_len != -1) {
679 if (call->tx_total_len != -1 ||
680 call->tx_pending ||
681 call->tx_top != 0)
683 call->tx_total_len = p.call.tx_total_len;
687 switch (p.call.nr_timeouts) {
689 WRITE_ONCE(call->next_rx_timo, p.call.timeouts.normal);
692 WRITE_ONCE(call->next_req_timo, p.call.timeouts.idle);
695 if (p.call.timeouts.hard > 0) {
696 ktime_t delay = ms_to_ktime(p.call.timeouts.hard * MSEC_PER_SEC);
698 WRITE_ONCE(call->expect_term_by,
699 ktime_add(p.call.timeouts.hard,
701 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard);
702 rxrpc_poke_call(call, rxrpc_call_poke_set_timeout);
708 if (rxrpc_call_is_complete(call)) {
709 /* it's too late for this call */
712 rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
718 ret = rxrpc_send_data(rx, call, msg, len, NULL, &dropped_lock);
723 mutex_unlock(&call->user_mutex);
725 rxrpc_put_call(call, rxrpc_call_put_sendmsg);
735 * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
736 * @sock: The socket the call is on
737 * @call: The call to send data through
742 * Allow a kernel service to send data on a call. The call must be in an state
747 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
754 _enter("{%d},", call->debug_id);
759 mutex_lock(&call->user_mutex);
761 ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
764 ret = call->error;
767 mutex_unlock(&call->user_mutex);
774 * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
775 * @sock: The socket the call is on
776 * @call: The call to be aborted
781 * Allow a kernel service to abort a call, if it's still in an abortable state
782 * and return true if the call was aborted, false if it was already complete.
784 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
789 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
791 mutex_lock(&call->user_mutex);
792 aborted = rxrpc_propose_abort(call, abort_code, error, why);
793 mutex_unlock(&call->user_mutex);
799 * rxrpc_kernel_set_tx_length - Set the total Tx length on a call
800 * @sock: The socket the call is on
801 * @call: The call to be informed
802 * @tx_total_len: The amount of data to be transmitted for this call
804 * Allow a kernel service to set the total transmit length on a call. This
808 * request length can be set when beginning the call.
810 void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
813 WARN_ON(call->tx_total_len != -1);
814 call->tx_total_len = tx_total_len;