• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/rxrpc/

Lines Matching refs:call

38  * - the caller must hold a lock on call->lock
44 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
48 struct rxrpc_sock *rx = call->socket;
57 ASSERTCMP(sp->call, ==, call);
59 /* if we've already posted the terminal message for a call, then we
61 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
63 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
65 sp->call = NULL;
66 rxrpc_put_call(call);
88 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
89 !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
90 call->socket->sk.sk_state != RXRPC_CLOSE) {
98 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
103 rx->interceptor(sk, call->user_call_ID, skb);
130 sp->call = NULL;
131 rxrpc_put_call(call);
143 static int rxrpc_fast_process_data(struct rxrpc_call *call,
150 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
153 ASSERTCMP(sp->call, ==, NULL);
155 spin_lock(&call->lock);
157 if (call->state > RXRPC_CALL_COMPLETE)
160 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
161 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
162 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
164 if (seq < call->rx_data_post) {
165 _debug("dup #%u [-%u]", seq, call->rx_data_post);
172 ackbit = seq - (call->rx_data_eaten + 1);
174 if (__test_and_set_bit(ackbit, call->ackr_window)) {
176 seq, call->rx_data_eaten, call->rx_data_post);
181 if (seq >= call->ackr_win_top) {
182 _debug("exceed #%u [%u]", seq, call->ackr_win_top);
183 __clear_bit(ackbit, call->ackr_window);
188 if (seq == call->rx_data_expect) {
189 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
190 call->rx_data_expect++;
191 } else if (seq > call->rx_data_expect) {
192 _debug("oos #%u [%u]", seq, call->rx_data_expect);
193 call->rx_data_expect = seq + 1;
194 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
201 if (seq != call->rx_data_post) {
202 _debug("ahead #%u [%u]", seq, call->rx_data_post);
206 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
211 if (call->conn->security)
214 sp->call = call;
215 rxrpc_get_call(call);
218 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
221 __clear_bit(ackbit, call->ackr_window);
231 ASSERTCMP(call->rx_data_post, ==, seq);
232 call->rx_data_post++;
235 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
239 if (call->rx_data_post == call->rx_first_oos) {
241 read_lock(&call->state_lock);
242 if (call->state < RXRPC_CALL_COMPLETE &&
243 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
244 rxrpc_queue_call(call);
245 read_unlock(&call->state_lock);
248 spin_unlock(&call->lock);
249 atomic_inc(&call->ackr_not_idle);
250 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
257 spin_unlock(&call->lock);
263 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
265 spin_unlock(&call->lock);
271 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
274 spin_unlock(&call->lock);
275 skb_queue_tail(&call->rx_queue, skb);
276 atomic_inc(&call->ackr_not_idle);
277 read_lock(&call->state_lock);
278 if (call->state < RXRPC_CALL_DEAD)
279 rxrpc_queue_call(call);
280 read_unlock(&call->state_lock);
289 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
291 write_lock_bh(&call->state_lock);
293 switch (call->state) {
295 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
296 call->acks_latest = serial;
298 _debug("implicit ACKALL %%%u", call->acks_latest);
299 set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
300 write_unlock_bh(&call->state_lock);
302 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
303 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
304 clear_bit(RXRPC_CALL_RESEND, &call->events);
305 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
310 write_unlock_bh(&call->state_lock);
316 * post an incoming packet to the nominated call to deal with
319 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
325 _enter("%p,%p", call, skb);
333 hi_serial = atomic_read(&call->conn->hi_serial);
335 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
342 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
357 write_lock_bh(&call->state_lock);
358 if (call->state < RXRPC_CALL_COMPLETE) {
359 call->state = RXRPC_CALL_REMOTELY_ABORTED;
360 call->abort_code = abort_code;
361 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
362 rxrpc_queue_call(call);
369 if (call->conn->out_clientflag)
372 write_lock_bh(&call->state_lock);
373 switch (call->state) {
375 call->state = RXRPC_CALL_SERVER_BUSY;
376 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
377 rxrpc_queue_call(call);
396 call->ackr_prev_seq = sp->hdr.seq;
400 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
401 rxrpc_assume_implicit_ackall(call, serial);
403 switch (rxrpc_fast_process_data(call, skb, seq)) {
418 read_lock_bh(&call->state_lock);
419 if (call->state < RXRPC_CALL_DEAD) {
420 skb_queue_tail(&call->rx_queue, skb);
421 rxrpc_queue_call(call);
424 read_unlock_bh(&call->state_lock);
430 write_lock_bh(&call->state_lock);
432 if (call->state <= RXRPC_CALL_COMPLETE) {
433 call->state = RXRPC_CALL_LOCALLY_ABORTED;
434 call->abort_code = RX_PROTOCOL_ERROR;
435 set_bit(RXRPC_CALL_ABORT, &call->events);
436 rxrpc_queue_call(call);
439 write_unlock_bh(&call->state_lock);
449 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
490 rxrpc_fast_process_packet(call, part);
495 rxrpc_fast_process_packet(call, jumbo);
503 write_lock_bh(&call->state_lock);
504 if (call->state <= RXRPC_CALL_COMPLETE) {
505 call->state = RXRPC_CALL_LOCALLY_ABORTED;
506 call->abort_code = RX_PROTOCOL_ERROR;
507 set_bit(RXRPC_CALL_ABORT, &call->events);
508 rxrpc_queue_call(call);
510 write_unlock_bh(&call->state_lock);
515 * post an incoming packet to the appropriate call/socket to deal with
522 struct rxrpc_call *call;
533 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
534 if (!call || call->call_id != sp->hdr.callNumber)
537 _debug("extant call [%d]", call->state);
538 ASSERTCMP(call->conn, ==, conn);
540 read_lock(&call->state_lock);
541 switch (call->state) {
543 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
544 rxrpc_queue_call(call);
553 read_unlock(&call->state_lock);
554 rxrpc_get_call(call);
559 rxrpc_process_jumbo_packet(call, skb);
561 rxrpc_fast_process_packet(call, skb);
563 rxrpc_put_call(call);
569 _debug("call not extant");
574 call = rb_entry(p, struct rxrpc_call, conn_node);
576 if (call_id < call->call_id)
578 else if (call_id > call->call_id)
585 /* it's a either a really old call that we no longer remember or its a
586 * new incoming call */
591 _debug("incoming call");
597 _debug("dead call");
602 /* resend last packet of a completed call
607 _debug("completed call");
609 if (atomic_read(&call->usage) == 0)
613 read_lock(&call->state_lock);
614 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
615 call->state, >=, RXRPC_CALL_COMPLETE);
617 if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
618 call->state == RXRPC_CALL_REMOTELY_ABORTED ||
619 call->state == RXRPC_CALL_DEAD) {
620 read_unlock(&call->state_lock);
624 if (call->conn->in_clientflag) {
625 read_unlock(&call->state_lock);
626 goto dead_call; /* complete server call */
630 rxrpc_get_call(call);
631 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
632 rxrpc_queue_call(call);
635 read_unlock(&call->state_lock);
699 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
726 _net("Rx RxRPC %s ep=%x call=%x:%x",
766 _debug("can't route call");
785 _leave(" [no call]");