1/* 2 * linux/net/sunrpc/clnt.c 3 * 4 * This file contains the high-level RPC interface. 5 * It is modeled as a finite state machine to support both synchronous 6 * and asynchronous requests. 7 * 8 * - RPC header generation and argument serialization. 9 * - Credential refresh. 10 * - TCP connect handling. 11 * - Retry of operation when it is suspected the operation failed because 12 * of uid squashing on the server, or when the credentials were stale 13 * and need to be refreshed, or when a packet was damaged in transit. 14 * This may be have to be moved to the VFS layer. 15 * 16 * NB: BSD uses a more intelligent approach to guessing when a request 17 * or reply has been lost by keeping the RTO estimate for each procedure. 18 * We currently make do with a constant timeout value. 19 * 20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 22 */ 23 24#include <asm/system.h> 25 26#include <linux/module.h> 27#include <linux/types.h> 28#include <linux/mm.h> 29#include <linux/slab.h> 30#include <linux/smp_lock.h> 31#include <linux/utsname.h> 32#include <linux/workqueue.h> 33 34#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/rpc_pipe_fs.h> 36#include <linux/sunrpc/metrics.h> 37 38 39#ifdef RPC_DEBUG 40# define RPCDBG_FACILITY RPCDBG_CALL 41#endif 42 43#define dprint_status(t) \ 44 dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ 45 __FUNCTION__, t->tk_status) 46 47static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); 48 49 50static void call_start(struct rpc_task *task); 51static void call_reserve(struct rpc_task *task); 52static void call_reserveresult(struct rpc_task *task); 53static void call_allocate(struct rpc_task *task); 54static void call_encode(struct rpc_task *task); 55static void call_decode(struct rpc_task *task); 56static void call_bind(struct rpc_task *task); 57static void call_bind_status(struct rpc_task *task); 58static void call_transmit(struct rpc_task *task); 59static void call_status(struct rpc_task *task); 60static void call_transmit_status(struct rpc_task *task); 61static void call_refresh(struct rpc_task *task); 62static void call_refreshresult(struct rpc_task *task); 63static void call_timeout(struct rpc_task *task); 64static void call_connect(struct rpc_task *task); 65static void call_connect_status(struct rpc_task *task); 66static __be32 * call_header(struct rpc_task *task); 67static __be32 * call_verify(struct rpc_task *task); 68 69 70static int 71rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 72{ 73 static uint32_t clntid; 74 int error; 75 76 clnt->cl_vfsmnt = ERR_PTR(-ENOENT); 77 clnt->cl_dentry = ERR_PTR(-ENOENT); 78 if (dir_name == NULL) 79 return 0; 80 81 clnt->cl_vfsmnt = rpc_get_mount(); 82 if (IS_ERR(clnt->cl_vfsmnt)) 83 return PTR_ERR(clnt->cl_vfsmnt); 84 85 for (;;) { 86 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 87 "%s/clnt%x", dir_name, 88 (unsigned int)clntid++); 89 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 90 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 91 if (!IS_ERR(clnt->cl_dentry)) 92 return 0; 93 error = PTR_ERR(clnt->cl_dentry); 94 if (error != -EEXIST) { 95 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 96 clnt->cl_pathname, error); 97 rpc_put_mount(); 98 return error; 99 } 100 } 101} 102 103static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, struct rpc_program *program, u32 vers, rpc_authflavor_t flavor) 104{ 105 struct rpc_version *version; 106 struct rpc_clnt *clnt = NULL; 107 struct rpc_auth *auth; 108 int err; 109 int len; 110 111 dprintk("RPC: creating %s client for %s (xprt %p)\n", 112 program->name, servname, xprt); 113 114 err = -EINVAL; 115 if (!xprt) 116 goto out_no_xprt; 117 if (vers >= program->nrvers || !(version = program->version[vers])) 118 goto out_err; 119 120 err = -ENOMEM; 121 clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); 122 if (!clnt) 123 goto out_err; 124 atomic_set(&clnt->cl_users, 0); 125 atomic_set(&clnt->cl_count, 1); 126 clnt->cl_parent = clnt; 127 128 clnt->cl_server = clnt->cl_inline_name; 129 len = strlen(servname) + 1; 130 if (len > sizeof(clnt->cl_inline_name)) { 131 char *buf = kmalloc(len, GFP_KERNEL); 132 if (buf != 0) 133 clnt->cl_server = buf; 134 else 135 len = sizeof(clnt->cl_inline_name); 136 } 137 strlcpy(clnt->cl_server, servname, len); 138 139 clnt->cl_xprt = xprt; 140 clnt->cl_procinfo = version->procs; 141 clnt->cl_maxproc = version->nrprocs; 142 clnt->cl_protname = program->name; 143 clnt->cl_prog = program->number; 144 clnt->cl_vers = version->number; 145 clnt->cl_stats = program->stats; 146 clnt->cl_metrics = rpc_alloc_iostats(clnt); 147 err = -ENOMEM; 148 if (clnt->cl_metrics == NULL) 149 goto out_no_stats; 150 clnt->cl_program = program; 151 152 if (!xprt_bound(clnt->cl_xprt)) 153 clnt->cl_autobind = 1; 154 155 clnt->cl_rtt = &clnt->cl_rtt_default; 156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval); 157 158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name); 159 if (err < 0) 160 goto out_no_path; 161 162 auth = rpcauth_create(flavor, clnt); 163 if (IS_ERR(auth)) { 164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n", 165 flavor); 166 err = PTR_ERR(auth); 167 goto out_no_auth; 168 } 169 170 /* save the nodename */ 171 clnt->cl_nodelen = strlen(utsname()->nodename); 172 if (clnt->cl_nodelen > UNX_MAXNODENAME) 173 clnt->cl_nodelen = UNX_MAXNODENAME; 174 memcpy(clnt->cl_nodename, utsname()->nodename, clnt->cl_nodelen); 175 return clnt; 176 177out_no_auth: 178 if (!IS_ERR(clnt->cl_dentry)) { 179 rpc_rmdir(clnt->cl_dentry); 180 rpc_put_mount(); 181 } 182out_no_path: 183 rpc_free_iostats(clnt->cl_metrics); 184out_no_stats: 185 if (clnt->cl_server != clnt->cl_inline_name) 186 kfree(clnt->cl_server); 187 kfree(clnt); 188out_err: 189 xprt_put(xprt); 190out_no_xprt: 191 return ERR_PTR(err); 192} 193 194/* 195 * rpc_create - create an RPC client and transport with one call 196 * @args: rpc_clnt create argument structure 197 * 198 * Creates and initializes an RPC transport and an RPC client. 199 * 200 * It can ping the server in order to determine if it is up, and to see if 201 * it supports this program and version. RPC_CLNT_CREATE_NOPING disables 202 * this behavior so asynchronous tasks can also use rpc_create. 203 */ 204struct rpc_clnt *rpc_create(struct rpc_create_args *args) 205{ 206 struct rpc_xprt *xprt; 207 struct rpc_clnt *clnt; 208 209 xprt = xprt_create_transport(args->protocol, args->address, 210 args->addrsize, args->timeout); 211 if (IS_ERR(xprt)) 212 return (struct rpc_clnt *)xprt; 213 214 /* 215 * By default, kernel RPC client connects from a reserved port. 216 * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, 217 * but it is always enabled for rpciod, which handles the connect 218 * operation. 219 */ 220 xprt->resvport = 1; 221 if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) 222 xprt->resvport = 0; 223 224 dprintk("RPC: creating %s client for %s (xprt %p)\n", 225 args->program->name, args->servername, xprt); 226 227 clnt = rpc_new_client(xprt, args->servername, args->program, 228 args->version, args->authflavor); 229 if (IS_ERR(clnt)) 230 return clnt; 231 232 if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { 233 int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 234 if (err != 0) { 235 rpc_shutdown_client(clnt); 236 return ERR_PTR(err); 237 } 238 } 239 240 clnt->cl_softrtry = 1; 241 if (args->flags & RPC_CLNT_CREATE_HARDRTRY) 242 clnt->cl_softrtry = 0; 243 244 if (args->flags & RPC_CLNT_CREATE_INTR) 245 clnt->cl_intr = 1; 246 if (args->flags & RPC_CLNT_CREATE_AUTOBIND) 247 clnt->cl_autobind = 1; 248 if (args->flags & RPC_CLNT_CREATE_ONESHOT) 249 clnt->cl_oneshot = 1; 250 if (args->flags & RPC_CLNT_CREATE_DISCRTRY) 251 clnt->cl_discrtry = 1; 252 253 return clnt; 254} 255EXPORT_SYMBOL_GPL(rpc_create); 256 257/* 258 * This function clones the RPC client structure. It allows us to share the 259 * same transport while varying parameters such as the authentication 260 * flavour. 261 */ 262struct rpc_clnt * 263rpc_clone_client(struct rpc_clnt *clnt) 264{ 265 struct rpc_clnt *new; 266 int err = -ENOMEM; 267 268 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 269 if (!new) 270 goto out_no_clnt; 271 atomic_set(&new->cl_count, 1); 272 atomic_set(&new->cl_users, 0); 273 new->cl_metrics = rpc_alloc_iostats(clnt); 274 if (new->cl_metrics == NULL) 275 goto out_no_stats; 276 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); 277 if (err != 0) 278 goto out_no_path; 279 new->cl_parent = clnt; 280 atomic_inc(&clnt->cl_count); 281 new->cl_xprt = xprt_get(clnt->cl_xprt); 282 /* Turn off autobind on clones */ 283 new->cl_autobind = 0; 284 new->cl_oneshot = 0; 285 new->cl_dead = 0; 286 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 287 if (new->cl_auth) 288 atomic_inc(&new->cl_auth->au_count); 289 return new; 290out_no_path: 291 rpc_free_iostats(new->cl_metrics); 292out_no_stats: 293 kfree(new); 294out_no_clnt: 295 dprintk("RPC: %s: returned error %d\n", __FUNCTION__, err); 296 return ERR_PTR(err); 297} 298 299/* 300 * Properly shut down an RPC client, terminating all outstanding 301 * requests. Note that we must be certain that cl_oneshot and 302 * cl_dead are cleared, or else the client would be destroyed 303 * when the last task releases it. 304 */ 305int 306rpc_shutdown_client(struct rpc_clnt *clnt) 307{ 308 dprintk("RPC: shutting down %s client for %s, tasks=%d\n", 309 clnt->cl_protname, clnt->cl_server, 310 atomic_read(&clnt->cl_users)); 311 312 while (atomic_read(&clnt->cl_users) > 0) { 313 /* Don't let rpc_release_client destroy us */ 314 clnt->cl_oneshot = 0; 315 clnt->cl_dead = 0; 316 rpc_killall_tasks(clnt); 317 wait_event_timeout(destroy_wait, 318 !atomic_read(&clnt->cl_users), 1*HZ); 319 } 320 321 if (atomic_read(&clnt->cl_users) < 0) { 322 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n", 323 clnt, atomic_read(&clnt->cl_users)); 324#ifdef RPC_DEBUG 325 rpc_show_tasks(); 326#endif 327 BUG(); 328 } 329 330 return rpc_destroy_client(clnt); 331} 332 333/* 334 * Delete an RPC client 335 */ 336int 337rpc_destroy_client(struct rpc_clnt *clnt) 338{ 339 if (!atomic_dec_and_test(&clnt->cl_count)) 340 return 1; 341 BUG_ON(atomic_read(&clnt->cl_users) != 0); 342 343 dprintk("RPC: destroying %s client for %s\n", 344 clnt->cl_protname, clnt->cl_server); 345 if (clnt->cl_auth) { 346 rpcauth_destroy(clnt->cl_auth); 347 clnt->cl_auth = NULL; 348 } 349 if (!IS_ERR(clnt->cl_dentry)) { 350 rpc_rmdir(clnt->cl_dentry); 351 rpc_put_mount(); 352 } 353 if (clnt->cl_parent != clnt) { 354 rpc_destroy_client(clnt->cl_parent); 355 goto out_free; 356 } 357 if (clnt->cl_server != clnt->cl_inline_name) 358 kfree(clnt->cl_server); 359out_free: 360 rpc_free_iostats(clnt->cl_metrics); 361 clnt->cl_metrics = NULL; 362 xprt_put(clnt->cl_xprt); 363 kfree(clnt); 364 return 0; 365} 366 367/* 368 * Release an RPC client 369 */ 370void 371rpc_release_client(struct rpc_clnt *clnt) 372{ 373 dprintk("RPC: rpc_release_client(%p, %d)\n", 374 clnt, atomic_read(&clnt->cl_users)); 375 376 if (!atomic_dec_and_test(&clnt->cl_users)) 377 return; 378 wake_up(&destroy_wait); 379 if (clnt->cl_oneshot || clnt->cl_dead) 380 rpc_destroy_client(clnt); 381} 382 383/** 384 * rpc_bind_new_program - bind a new RPC program to an existing client 385 * @old - old rpc_client 386 * @program - rpc program to set 387 * @vers - rpc program version 388 * 389 * Clones the rpc client and sets up a new RPC program. This is mainly 390 * of use for enabling different RPC programs to share the same transport. 391 * The Sun NFSv2/v3 ACL protocol can do this. 392 */ 393struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, 394 struct rpc_program *program, 395 int vers) 396{ 397 struct rpc_clnt *clnt; 398 struct rpc_version *version; 399 int err; 400 401 BUG_ON(vers >= program->nrvers || !program->version[vers]); 402 version = program->version[vers]; 403 clnt = rpc_clone_client(old); 404 if (IS_ERR(clnt)) 405 goto out; 406 clnt->cl_procinfo = version->procs; 407 clnt->cl_maxproc = version->nrprocs; 408 clnt->cl_protname = program->name; 409 clnt->cl_prog = program->number; 410 clnt->cl_vers = version->number; 411 clnt->cl_stats = program->stats; 412 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR); 413 if (err != 0) { 414 rpc_shutdown_client(clnt); 415 clnt = ERR_PTR(err); 416 } 417out: 418 return clnt; 419} 420 421/* 422 * Default callback for async RPC calls 423 */ 424static void 425rpc_default_callback(struct rpc_task *task, void *data) 426{ 427} 428 429static const struct rpc_call_ops rpc_default_ops = { 430 .rpc_call_done = rpc_default_callback, 431}; 432 433/* 434 * Export the signal mask handling for synchronous code that 435 * sleeps on RPC calls 436 */ 437#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 438 439static void rpc_save_sigmask(sigset_t *oldset, int intr) 440{ 441 unsigned long sigallow = sigmask(SIGKILL); 442 sigset_t sigmask; 443 444 /* Block all signals except those listed in sigallow */ 445 if (intr) 446 sigallow |= RPC_INTR_SIGNALS; 447 siginitsetinv(&sigmask, sigallow); 448 sigprocmask(SIG_BLOCK, &sigmask, oldset); 449} 450 451static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset) 452{ 453 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task)); 454} 455 456static inline void rpc_restore_sigmask(sigset_t *oldset) 457{ 458 sigprocmask(SIG_SETMASK, oldset, NULL); 459} 460 461void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset) 462{ 463 rpc_save_sigmask(oldset, clnt->cl_intr); 464} 465 466void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset) 467{ 468 rpc_restore_sigmask(oldset); 469} 470 471/* 472 * New rpc_call implementation 473 */ 474int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 475{ 476 struct rpc_task *task; 477 sigset_t oldset; 478 int status; 479 480 /* If this client is slain all further I/O fails */ 481 if (clnt->cl_dead) 482 return -EIO; 483 484 BUG_ON(flags & RPC_TASK_ASYNC); 485 486 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 487 if (task == NULL) 488 return -ENOMEM; 489 490 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 491 rpc_task_sigmask(task, &oldset); 492 493 /* Set up the call info struct and execute the task */ 494 rpc_call_setup(task, msg, 0); 495 if (task->tk_status == 0) { 496 atomic_inc(&task->tk_count); 497 rpc_execute(task); 498 } 499 status = task->tk_status; 500 rpc_put_task(task); 501 rpc_restore_sigmask(&oldset); 502 return status; 503} 504 505/* 506 * New rpc_call implementation 507 */ 508int 509rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 510 const struct rpc_call_ops *tk_ops, void *data) 511{ 512 struct rpc_task *task; 513 sigset_t oldset; 514 int status; 515 516 /* If this client is slain all further I/O fails */ 517 status = -EIO; 518 if (clnt->cl_dead) 519 goto out_release; 520 521 flags |= RPC_TASK_ASYNC; 522 523 /* Create/initialize a new RPC task */ 524 status = -ENOMEM; 525 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 526 goto out_release; 527 528 /* Mask signals on GSS_AUTH upcalls */ 529 rpc_task_sigmask(task, &oldset); 530 531 rpc_call_setup(task, msg, 0); 532 533 /* Set up the call info struct and execute the task */ 534 status = task->tk_status; 535 if (status == 0) 536 rpc_execute(task); 537 else 538 rpc_put_task(task); 539 540 rpc_restore_sigmask(&oldset); 541 return status; 542out_release: 543 rpc_release_calldata(tk_ops, data); 544 return status; 545} 546 547 548void 549rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags) 550{ 551 task->tk_msg = *msg; 552 task->tk_flags |= flags; 553 /* Bind the user cred */ 554 if (task->tk_msg.rpc_cred != NULL) 555 rpcauth_holdcred(task); 556 else 557 rpcauth_bindcred(task); 558 559 if (task->tk_status == 0) 560 task->tk_action = call_start; 561 else 562 task->tk_action = rpc_exit_task; 563} 564 565/** 566 * rpc_peeraddr - extract remote peer address from clnt's xprt 567 * @clnt: RPC client structure 568 * @buf: target buffer 569 * @size: length of target buffer 570 * 571 * Returns the number of bytes that are actually in the stored address. 572 */ 573size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) 574{ 575 size_t bytes; 576 struct rpc_xprt *xprt = clnt->cl_xprt; 577 578 bytes = sizeof(xprt->addr); 579 if (bytes > bufsize) 580 bytes = bufsize; 581 memcpy(buf, &clnt->cl_xprt->addr, bytes); 582 return xprt->addrlen; 583} 584EXPORT_SYMBOL_GPL(rpc_peeraddr); 585 586/** 587 * rpc_peeraddr2str - return remote peer address in printable format 588 * @clnt: RPC client structure 589 * @format: address format 590 * 591 */ 592char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) 593{ 594 struct rpc_xprt *xprt = clnt->cl_xprt; 595 596 if (xprt->address_strings[format] != NULL) 597 return xprt->address_strings[format]; 598 else 599 return "unprintable"; 600} 601EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 602 603void 604rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) 605{ 606 struct rpc_xprt *xprt = clnt->cl_xprt; 607 if (xprt->ops->set_buffer_size) 608 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); 609} 610 611/* 612 * Return size of largest payload RPC client can support, in bytes 613 * 614 * For stream transports, this is one RPC record fragment (see RFC 615 * 1831), as we don't support multi-record requests yet. For datagram 616 * transports, this is the size of an IP packet minus the IP, UDP, and 617 * RPC header sizes. 618 */ 619size_t rpc_max_payload(struct rpc_clnt *clnt) 620{ 621 return clnt->cl_xprt->max_payload; 622} 623EXPORT_SYMBOL_GPL(rpc_max_payload); 624 625/** 626 * rpc_force_rebind - force transport to check that remote port is unchanged 627 * @clnt: client to rebind 628 * 629 */ 630void rpc_force_rebind(struct rpc_clnt *clnt) 631{ 632 if (clnt->cl_autobind) 633 xprt_clear_bound(clnt->cl_xprt); 634} 635EXPORT_SYMBOL_GPL(rpc_force_rebind); 636 637/* 638 * Restart an (async) RPC call. Usually called from within the 639 * exit handler. 640 */ 641void 642rpc_restart_call(struct rpc_task *task) 643{ 644 if (RPC_ASSASSINATED(task)) 645 return; 646 647 task->tk_action = call_start; 648} 649 650/* 651 * 0. Initial state 652 * 653 * Other FSM states can be visited zero or more times, but 654 * this state is visited exactly once for each RPC. 655 */ 656static void 657call_start(struct rpc_task *task) 658{ 659 struct rpc_clnt *clnt = task->tk_client; 660 661 dprintk("RPC: %5u call_start %s%d proc %d (%s)\n", task->tk_pid, 662 clnt->cl_protname, clnt->cl_vers, 663 task->tk_msg.rpc_proc->p_proc, 664 (RPC_IS_ASYNC(task) ? "async" : "sync")); 665 666 /* Increment call count */ 667 task->tk_msg.rpc_proc->p_count++; 668 clnt->cl_stats->rpccnt++; 669 task->tk_action = call_reserve; 670} 671 672/* 673 * 1. Reserve an RPC call slot 674 */ 675static void 676call_reserve(struct rpc_task *task) 677{ 678 dprint_status(task); 679 680 if (!rpcauth_uptodatecred(task)) { 681 task->tk_action = call_refresh; 682 return; 683 } 684 685 task->tk_status = 0; 686 task->tk_action = call_reserveresult; 687 xprt_reserve(task); 688} 689 690/* 691 * 1b. Grok the result of xprt_reserve() 692 */ 693static void 694call_reserveresult(struct rpc_task *task) 695{ 696 int status = task->tk_status; 697 698 dprint_status(task); 699 700 /* 701 * After a call to xprt_reserve(), we must have either 702 * a request slot or else an error status. 703 */ 704 task->tk_status = 0; 705 if (status >= 0) { 706 if (task->tk_rqstp) { 707 task->tk_action = call_allocate; 708 return; 709 } 710 711 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", 712 __FUNCTION__, status); 713 rpc_exit(task, -EIO); 714 return; 715 } 716 717 /* 718 * Even though there was an error, we may have acquired 719 * a request slot somehow. Make sure not to leak it. 720 */ 721 if (task->tk_rqstp) { 722 printk(KERN_ERR "%s: status=%d, request allocated anyway\n", 723 __FUNCTION__, status); 724 xprt_release(task); 725 } 726 727 switch (status) { 728 case -EAGAIN: /* woken up; retry */ 729 task->tk_action = call_reserve; 730 return; 731 case -EIO: /* probably a shutdown */ 732 break; 733 default: 734 printk(KERN_ERR "%s: unrecognized error %d, exiting\n", 735 __FUNCTION__, status); 736 break; 737 } 738 rpc_exit(task, status); 739} 740 741/* 742 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc. 743 * (Note: buffer memory is freed in xprt_release). 744 */ 745static void 746call_allocate(struct rpc_task *task) 747{ 748 unsigned int slack = task->tk_auth->au_cslack; 749 struct rpc_rqst *req = task->tk_rqstp; 750 struct rpc_xprt *xprt = task->tk_xprt; 751 struct rpc_procinfo *proc = task->tk_msg.rpc_proc; 752 753 dprint_status(task); 754 755 task->tk_status = 0; 756 task->tk_action = call_bind; 757 758 if (req->rq_buffer) 759 return; 760 761 if (proc->p_proc != 0) { 762 BUG_ON(proc->p_arglen == 0); 763 if (proc->p_decode != NULL) 764 BUG_ON(proc->p_replen == 0); 765 } 766 767 /* 768 * Calculate the size (in quads) of the RPC call 769 * and reply headers, and convert both values 770 * to byte sizes. 771 */ 772 req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; 773 req->rq_callsize <<= 2; 774 req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; 775 req->rq_rcvsize <<= 2; 776 777 req->rq_buffer = xprt->ops->buf_alloc(task, 778 req->rq_callsize + req->rq_rcvsize); 779 if (req->rq_buffer != NULL) 780 return; 781 782 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 783 784 if (RPC_IS_ASYNC(task) || !signalled()) { 785 xprt_release(task); 786 task->tk_action = call_reserve; 787 rpc_delay(task, HZ>>4); 788 return; 789 } 790 791 rpc_exit(task, -ERESTARTSYS); 792} 793 794static inline int 795rpc_task_need_encode(struct rpc_task *task) 796{ 797 return task->tk_rqstp->rq_snd_buf.len == 0; 798} 799 800static inline void 801rpc_task_force_reencode(struct rpc_task *task) 802{ 803 task->tk_rqstp->rq_snd_buf.len = 0; 804} 805 806static inline void 807rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) 808{ 809 buf->head[0].iov_base = start; 810 buf->head[0].iov_len = len; 811 buf->tail[0].iov_len = 0; 812 buf->page_len = 0; 813 buf->len = 0; 814 buf->buflen = len; 815} 816 817/* 818 * 3. Encode arguments of an RPC call 819 */ 820static void 821call_encode(struct rpc_task *task) 822{ 823 struct rpc_rqst *req = task->tk_rqstp; 824 kxdrproc_t encode; 825 __be32 *p; 826 827 dprint_status(task); 828 829 rpc_xdr_buf_init(&req->rq_snd_buf, 830 req->rq_buffer, 831 req->rq_callsize); 832 rpc_xdr_buf_init(&req->rq_rcv_buf, 833 (char *)req->rq_buffer + req->rq_callsize, 834 req->rq_rcvsize); 835 836 /* Encode header and provided arguments */ 837 encode = task->tk_msg.rpc_proc->p_encode; 838 if (!(p = call_header(task))) { 839 printk(KERN_INFO "RPC: call_header failed, exit EIO\n"); 840 rpc_exit(task, -EIO); 841 return; 842 } 843 if (encode == NULL) 844 return; 845 846 lock_kernel(); 847 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 848 task->tk_msg.rpc_argp); 849 unlock_kernel(); 850 if (task->tk_status == -ENOMEM) { 851 rpc_delay(task, 3*HZ); 852 task->tk_status = -EAGAIN; 853 } 854} 855 856/* 857 * 4. Get the server port number if not yet set 858 */ 859static void 860call_bind(struct rpc_task *task) 861{ 862 struct rpc_xprt *xprt = task->tk_xprt; 863 864 dprint_status(task); 865 866 task->tk_action = call_connect; 867 if (!xprt_bound(xprt)) { 868 task->tk_action = call_bind_status; 869 task->tk_timeout = xprt->bind_timeout; 870 xprt->ops->rpcbind(task); 871 } 872} 873 874/* 875 * 4a. Sort out bind result 876 */ 877static void 878call_bind_status(struct rpc_task *task) 879{ 880 int status = -EACCES; 881 882 if (task->tk_status >= 0) { 883 dprint_status(task); 884 task->tk_status = 0; 885 task->tk_action = call_connect; 886 return; 887 } 888 889 switch (task->tk_status) { 890 case -EACCES: 891 dprintk("RPC: %5u remote rpcbind: RPC program/version " 892 "unavailable\n", task->tk_pid); 893 rpc_delay(task, 3*HZ); 894 goto retry_timeout; 895 case -ETIMEDOUT: 896 dprintk("RPC: %5u rpcbind request timed out\n", 897 task->tk_pid); 898 goto retry_timeout; 899 case -EPFNOSUPPORT: 900 dprintk("RPC: %5u remote rpcbind service unavailable\n", 901 task->tk_pid); 902 break; 903 case -EPROTONOSUPPORT: 904 dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", 905 task->tk_pid); 906 task->tk_status = 0; 907 task->tk_action = call_bind; 908 return; 909 default: 910 dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", 911 task->tk_pid, -task->tk_status); 912 status = -EIO; 913 } 914 915 rpc_exit(task, status); 916 return; 917 918retry_timeout: 919 task->tk_action = call_timeout; 920} 921 922/* 923 * 4b. Connect to the RPC server 924 */ 925static void 926call_connect(struct rpc_task *task) 927{ 928 struct rpc_xprt *xprt = task->tk_xprt; 929 930 dprintk("RPC: %5u call_connect xprt %p %s connected\n", 931 task->tk_pid, xprt, 932 (xprt_connected(xprt) ? "is" : "is not")); 933 934 task->tk_action = call_transmit; 935 if (!xprt_connected(xprt)) { 936 task->tk_action = call_connect_status; 937 if (task->tk_status < 0) 938 return; 939 xprt_connect(task); 940 } 941} 942 943/* 944 * 4c. Sort out connect result 945 */ 946static void 947call_connect_status(struct rpc_task *task) 948{ 949 struct rpc_clnt *clnt = task->tk_client; 950 int status = task->tk_status; 951 952 dprint_status(task); 953 954 task->tk_status = 0; 955 if (status >= 0) { 956 clnt->cl_stats->netreconn++; 957 task->tk_action = call_transmit; 958 return; 959 } 960 961 /* Something failed: remote service port may have changed */ 962 rpc_force_rebind(clnt); 963 964 switch (status) { 965 case -ENOTCONN: 966 case -EAGAIN: 967 task->tk_action = call_bind; 968 if (!RPC_IS_SOFT(task)) 969 return; 970 /* if soft mounted, test if we've timed out */ 971 case -ETIMEDOUT: 972 task->tk_action = call_timeout; 973 return; 974 } 975 rpc_exit(task, -EIO); 976} 977 978/* 979 * 5. Transmit the RPC request, and wait for reply 980 */ 981static void 982call_transmit(struct rpc_task *task) 983{ 984 dprint_status(task); 985 986 task->tk_action = call_status; 987 if (task->tk_status < 0) 988 return; 989 task->tk_status = xprt_prepare_transmit(task); 990 if (task->tk_status != 0) 991 return; 992 task->tk_action = call_transmit_status; 993 /* Encode here so that rpcsec_gss can use correct sequence number. */ 994 if (rpc_task_need_encode(task)) { 995 BUG_ON(task->tk_rqstp->rq_bytes_sent != 0); 996 call_encode(task); 997 /* Did the encode result in an error condition? */ 998 if (task->tk_status != 0) 999 return; 1000 } 1001 xprt_transmit(task); 1002 if (task->tk_status < 0) 1003 return; 1004 /* 1005 * On success, ensure that we call xprt_end_transmit() before sleeping 1006 * in order to allow access to the socket to other RPC requests. 1007 */ 1008 call_transmit_status(task); 1009 if (task->tk_msg.rpc_proc->p_decode != NULL) 1010 return; 1011 task->tk_action = rpc_exit_task; 1012 rpc_wake_up_task(task); 1013} 1014 1015/* 1016 * 5a. Handle cleanup after a transmission 1017 */ 1018static void 1019call_transmit_status(struct rpc_task *task) 1020{ 1021 task->tk_action = call_status; 1022 /* 1023 * Special case: if we've been waiting on the socket's write_space() 1024 * callback, then don't call xprt_end_transmit(). 1025 */ 1026 if (task->tk_status == -EAGAIN) 1027 return; 1028 xprt_end_transmit(task); 1029 rpc_task_force_reencode(task); 1030} 1031 1032/* 1033 * 6. Sort out the RPC call status 1034 */ 1035static void 1036call_status(struct rpc_task *task) 1037{ 1038 struct rpc_clnt *clnt = task->tk_client; 1039 struct rpc_rqst *req = task->tk_rqstp; 1040 int status; 1041 1042 if (req->rq_received > 0 && !req->rq_bytes_sent) 1043 task->tk_status = req->rq_received; 1044 1045 dprint_status(task); 1046 1047 status = task->tk_status; 1048 if (status >= 0) { 1049 task->tk_action = call_decode; 1050 return; 1051 } 1052 1053 task->tk_status = 0; 1054 switch(status) { 1055 case -EHOSTDOWN: 1056 case -EHOSTUNREACH: 1057 case -ENETUNREACH: 1058 /* 1059 * Delay any retries for 3 seconds, then handle as if it 1060 * were a timeout. 1061 */ 1062 rpc_delay(task, 3*HZ); 1063 case -ETIMEDOUT: 1064 task->tk_action = call_timeout; 1065 if (task->tk_client->cl_discrtry) 1066 xprt_disconnect(task->tk_xprt); 1067 break; 1068 case -ECONNREFUSED: 1069 case -ENOTCONN: 1070 rpc_force_rebind(clnt); 1071 task->tk_action = call_bind; 1072 break; 1073 case -EAGAIN: 1074 task->tk_action = call_transmit; 1075 break; 1076 case -EIO: 1077 /* shutdown or soft timeout */ 1078 rpc_exit(task, status); 1079 break; 1080 default: 1081 printk("%s: RPC call returned error %d\n", 1082 clnt->cl_protname, -status); 1083 rpc_exit(task, status); 1084 } 1085} 1086 1087/* 1088 * 6a. Handle RPC timeout 1089 * We do not release the request slot, so we keep using the 1090 * same XID for all retransmits. 1091 */ 1092static void 1093call_timeout(struct rpc_task *task) 1094{ 1095 struct rpc_clnt *clnt = task->tk_client; 1096 1097 if (xprt_adjust_timeout(task->tk_rqstp) == 0) { 1098 dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); 1099 goto retry; 1100 } 1101 1102 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 1103 task->tk_timeouts++; 1104 1105 if (RPC_IS_SOFT(task)) { 1106 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1107 clnt->cl_protname, clnt->cl_server); 1108 rpc_exit(task, -EIO); 1109 return; 1110 } 1111 1112 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { 1113 task->tk_flags |= RPC_CALL_MAJORSEEN; 1114 printk(KERN_NOTICE "%s: server %s not responding, still trying\n", 1115 clnt->cl_protname, clnt->cl_server); 1116 } 1117 rpc_force_rebind(clnt); 1118 1119retry: 1120 clnt->cl_stats->rpcretrans++; 1121 task->tk_action = call_bind; 1122 task->tk_status = 0; 1123} 1124 1125/* 1126 * 7. Decode the RPC reply 1127 */ 1128static void 1129call_decode(struct rpc_task *task) 1130{ 1131 struct rpc_clnt *clnt = task->tk_client; 1132 struct rpc_rqst *req = task->tk_rqstp; 1133 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1134 __be32 *p; 1135 1136 dprintk("RPC: %5u call_decode (status %d)\n", 1137 task->tk_pid, task->tk_status); 1138 1139 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1140 printk(KERN_NOTICE "%s: server %s OK\n", 1141 clnt->cl_protname, clnt->cl_server); 1142 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1143 } 1144 1145 if (task->tk_status < 12) { 1146 if (!RPC_IS_SOFT(task)) { 1147 task->tk_action = call_bind; 1148 clnt->cl_stats->rpcretrans++; 1149 goto out_retry; 1150 } 1151 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", 1152 clnt->cl_protname, task->tk_status); 1153 task->tk_action = call_timeout; 1154 goto out_retry; 1155 } 1156 1157 /* 1158 * Ensure that we see all writes made by xprt_complete_rqst() 1159 * before it changed req->rq_received. 1160 */ 1161 smp_rmb(); 1162 req->rq_rcv_buf.len = req->rq_private_buf.len; 1163 1164 /* Check that the softirq receive buffer is valid */ 1165 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1166 sizeof(req->rq_rcv_buf)) != 0); 1167 1168 /* Verify the RPC header */ 1169 p = call_verify(task); 1170 if (IS_ERR(p)) { 1171 if (p == ERR_PTR(-EAGAIN)) 1172 goto out_retry; 1173 return; 1174 } 1175 1176 task->tk_action = rpc_exit_task; 1177 1178 if (decode) { 1179 lock_kernel(); 1180 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1181 task->tk_msg.rpc_resp); 1182 unlock_kernel(); 1183 } 1184 dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, 1185 task->tk_status); 1186 return; 1187out_retry: 1188 req->rq_received = req->rq_private_buf.len = 0; 1189 task->tk_status = 0; 1190 if (task->tk_client->cl_discrtry) 1191 xprt_disconnect(task->tk_xprt); 1192} 1193 1194/* 1195 * 8. Refresh the credentials if rejected by the server 1196 */ 1197static void 1198call_refresh(struct rpc_task *task) 1199{ 1200 dprint_status(task); 1201 1202 xprt_release(task); /* Must do to obtain new XID */ 1203 task->tk_action = call_refreshresult; 1204 task->tk_status = 0; 1205 task->tk_client->cl_stats->rpcauthrefresh++; 1206 rpcauth_refreshcred(task); 1207} 1208 1209/* 1210 * 8a. Process the results of a credential refresh 1211 */ 1212static void 1213call_refreshresult(struct rpc_task *task) 1214{ 1215 int status = task->tk_status; 1216 1217 dprint_status(task); 1218 1219 task->tk_status = 0; 1220 task->tk_action = call_reserve; 1221 if (status >= 0 && rpcauth_uptodatecred(task)) 1222 return; 1223 if (status == -EACCES) { 1224 rpc_exit(task, -EACCES); 1225 return; 1226 } 1227 task->tk_action = call_refresh; 1228 if (status != -ETIMEDOUT) 1229 rpc_delay(task, 3*HZ); 1230 return; 1231} 1232 1233/* 1234 * Call header serialization 1235 */ 1236static __be32 * 1237call_header(struct rpc_task *task) 1238{ 1239 struct rpc_clnt *clnt = task->tk_client; 1240 struct rpc_rqst *req = task->tk_rqstp; 1241 __be32 *p = req->rq_svec[0].iov_base; 1242 1243 1244 p = xprt_skip_transport_header(task->tk_xprt, p); 1245 *p++ = req->rq_xid; /* XID */ 1246 *p++ = htonl(RPC_CALL); /* CALL */ 1247 *p++ = htonl(RPC_VERSION); /* RPC version */ 1248 *p++ = htonl(clnt->cl_prog); /* program number */ 1249 *p++ = htonl(clnt->cl_vers); /* program version */ 1250 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 1251 p = rpcauth_marshcred(task, p); 1252 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 1253 return p; 1254} 1255 1256/* 1257 * Reply header verification 1258 */ 1259static __be32 * 1260call_verify(struct rpc_task *task) 1261{ 1262 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; 1263 int len = task->tk_rqstp->rq_rcv_buf.len >> 2; 1264 __be32 *p = iov->iov_base; 1265 u32 n; 1266 int error = -EACCES; 1267 1268 if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { 1269 /* RFC-1014 says that the representation of XDR data must be a 1270 * multiple of four bytes 1271 * - if it isn't pointer subtraction in the NFS client may give 1272 * undefined results 1273 */ 1274 printk(KERN_WARNING 1275 "call_verify: XDR representation not a multiple of" 1276 " 4 bytes: 0x%x\n", task->tk_rqstp->rq_rcv_buf.len); 1277 goto out_eio; 1278 } 1279 if ((len -= 3) < 0) 1280 goto out_overflow; 1281 p += 1; /* skip XID */ 1282 1283 if ((n = ntohl(*p++)) != RPC_REPLY) { 1284 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n); 1285 goto out_garbage; 1286 } 1287 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { 1288 if (--len < 0) 1289 goto out_overflow; 1290 switch ((n = ntohl(*p++))) { 1291 case RPC_AUTH_ERROR: 1292 break; 1293 case RPC_MISMATCH: 1294 dprintk("RPC: %5u %s: RPC call version " 1295 "mismatch!\n", 1296 task->tk_pid, __FUNCTION__); 1297 error = -EPROTONOSUPPORT; 1298 goto out_err; 1299 default: 1300 dprintk("RPC: %5u %s: RPC call rejected, " 1301 "unknown error: %x\n", 1302 task->tk_pid, __FUNCTION__, n); 1303 goto out_eio; 1304 } 1305 if (--len < 0) 1306 goto out_overflow; 1307 switch ((n = ntohl(*p++))) { 1308 case RPC_AUTH_REJECTEDCRED: 1309 case RPC_AUTH_REJECTEDVERF: 1310 case RPCSEC_GSS_CREDPROBLEM: 1311 case RPCSEC_GSS_CTXPROBLEM: 1312 if (!task->tk_cred_retry) 1313 break; 1314 task->tk_cred_retry--; 1315 dprintk("RPC: %5u %s: retry stale creds\n", 1316 task->tk_pid, __FUNCTION__); 1317 rpcauth_invalcred(task); 1318 task->tk_action = call_refresh; 1319 goto out_retry; 1320 case RPC_AUTH_BADCRED: 1321 case RPC_AUTH_BADVERF: 1322 /* possibly garbled cred/verf? */ 1323 if (!task->tk_garb_retry) 1324 break; 1325 task->tk_garb_retry--; 1326 dprintk("RPC: %5u %s: retry garbled creds\n", 1327 task->tk_pid, __FUNCTION__); 1328 task->tk_action = call_bind; 1329 goto out_retry; 1330 case RPC_AUTH_TOOWEAK: 1331 printk(KERN_NOTICE "call_verify: server %s requires stronger " 1332 "authentication.\n", task->tk_client->cl_server); 1333 break; 1334 default: 1335 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1336 error = -EIO; 1337 } 1338 dprintk("RPC: %5u %s: call rejected %d\n", 1339 task->tk_pid, __FUNCTION__, n); 1340 goto out_err; 1341 } 1342 if (!(p = rpcauth_checkverf(task, p))) { 1343 printk(KERN_WARNING "call_verify: auth check failed\n"); 1344 goto out_garbage; /* bad verifier, retry */ 1345 } 1346 len = p - (__be32 *)iov->iov_base - 1; 1347 if (len < 0) 1348 goto out_overflow; 1349 switch ((n = ntohl(*p++))) { 1350 case RPC_SUCCESS: 1351 return p; 1352 case RPC_PROG_UNAVAIL: 1353 dprintk("RPC: %5u %s: program %u is unsupported by server %s\n", 1354 task->tk_pid, __FUNCTION__, 1355 (unsigned int)task->tk_client->cl_prog, 1356 task->tk_client->cl_server); 1357 error = -EPFNOSUPPORT; 1358 goto out_err; 1359 case RPC_PROG_MISMATCH: 1360 dprintk("RPC: %5u %s: program %u, version %u unsupported by " 1361 "server %s\n", task->tk_pid, __FUNCTION__, 1362 (unsigned int)task->tk_client->cl_prog, 1363 (unsigned int)task->tk_client->cl_vers, 1364 task->tk_client->cl_server); 1365 error = -EPROTONOSUPPORT; 1366 goto out_err; 1367 case RPC_PROC_UNAVAIL: 1368 dprintk("RPC: %5u %s: proc %p unsupported by program %u, " 1369 "version %u on server %s\n", 1370 task->tk_pid, __FUNCTION__, 1371 task->tk_msg.rpc_proc, 1372 task->tk_client->cl_prog, 1373 task->tk_client->cl_vers, 1374 task->tk_client->cl_server); 1375 error = -EOPNOTSUPP; 1376 goto out_err; 1377 case RPC_GARBAGE_ARGS: 1378 dprintk("RPC: %5u %s: server saw garbage\n", 1379 task->tk_pid, __FUNCTION__); 1380 break; /* retry */ 1381 default: 1382 printk(KERN_WARNING "call_verify: server accept status: %x\n", n); 1383 /* Also retry */ 1384 } 1385 1386out_garbage: 1387 task->tk_client->cl_stats->rpcgarbage++; 1388 if (task->tk_garb_retry) { 1389 task->tk_garb_retry--; 1390 dprintk("RPC: %5u %s: retrying\n", 1391 task->tk_pid, __FUNCTION__); 1392 task->tk_action = call_bind; 1393out_retry: 1394 return ERR_PTR(-EAGAIN); 1395 } 1396 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__); 1397out_eio: 1398 error = -EIO; 1399out_err: 1400 rpc_exit(task, error); 1401 return ERR_PTR(error); 1402out_overflow: 1403 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__); 1404 goto out_garbage; 1405} 1406 1407static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1408{ 1409 return 0; 1410} 1411 1412static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1413{ 1414 return 0; 1415} 1416 1417static struct rpc_procinfo rpcproc_null = { 1418 .p_encode = rpcproc_encode_null, 1419 .p_decode = rpcproc_decode_null, 1420}; 1421 1422int rpc_ping(struct rpc_clnt *clnt, int flags) 1423{ 1424 struct rpc_message msg = { 1425 .rpc_proc = &rpcproc_null, 1426 }; 1427 int err; 1428 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); 1429 err = rpc_call_sync(clnt, &msg, flags); 1430 put_rpccred(msg.rpc_cred); 1431 return err; 1432} 1433