• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/fs/ocfs2/dlm/

Lines Matching defs:to

21  * License along with this program; if not, write to the
208 struct dlm_master_list_entry *mle, int to);
270 * in order to avoid sleeping and allocation that occurs in
271 * heartbeat, master list entries are simply attached to the
275 * by the mle. the mle needs to be detached from the
277 * longer useful to the mle, and before the mle is freed.
412 /* attach the mle to the domain node up/down events */
530 * if this is bad, we can move this to a freelist. */
597 "Going to BUG for resource %.*s."
611 /* By the time we're ready to blow this guy away, we shouldn't
637 /* If we memset here, we lose our reference to the kmalloc'd
638 * res->lockname.name, so be sure to init every field
739 * also, do a lookup in the dlm->master_list to see
742 * for this name, and we should *not* attempt to master
743 * the lock here. need to wait around for that node
744 * to assert_master (or die).
784 /* wait until done messaging the master, drop our ref to allow
785 * the lockres to be purged, start over. */
805 /* nothing found and we need to allocate one. */
819 /* caller knows it's safe to assume it's not mastered elsewhere
831 /* check master list to see if another node has started mastering it */
834 /* if we found a block, wait for lock to be mastered by another node */
850 * either way, go back to the top and start over. */
873 /* go ahead and try to master lock on this node */
882 * to see if there are any nodes that still need to be
887 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
895 * DLM_MLE_MASTER on the master list, so it's safe to add the
896 * lockres to the hashtable. anyone who finds the lock will
897 * still have to wait on the IN_PROGRESS. */
899 /* finally add the lockres to its hash bucket */
904 /* if this node does not become the master make sure to drop
909 * if so, the creator of the BLOCK may try to put the last
911 * need an extra one to keep from a bad ptr deref. */
920 * so we only need to clear out the recovery map once. */
941 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
953 /* must wait for lock to be mastered elsewhere */
969 * master will know that asserts are needed back to
971 mlog(0, "%s:%.*s: requests only up to %u but master "
1017 /* need to free the unused mle */
1047 /* this will cause the master to re-assert across
1052 /* give recovery a chance to run */
1053 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1153 mlog(0, "about to master %.*s here, this=%u\n",
1158 * not in the response to the assert_master
1161 * due to node death. */
1164 /* no longer need to restart lock mastery.
1250 "master process may need to be restarted!\n");
1264 mlog(0, "sending request to new node\n");
1292 * there is now nothing left to
1293 * block on. we need to return
1300 "longer blocking. try to "
1315 /* reset the vote_map to the current node_map */
1340 struct dlm_master_list_entry *mle, int to)
1362 sizeof(request), to, &response);
1369 mlog(ML_ERROR, "bad args passed to o2net!\n");
1372 mlog(ML_ERROR, "out of memory while trying to send "
1385 mlog(ML_ERROR, "link to %d went down!\n", to);
1394 set_bit(to, mle->response_map);
1395 mlog(0, "node %u is the master, response=YES\n", to);
1398 res->lockname.name, to);
1399 mle->master = to;
1402 mlog(0, "node %u not master, response=NO\n", to);
1403 set_bit(to, mle->response_map);
1406 mlog(0, "node %u not master, response=MAYBE\n", to);
1407 set_bit(to, mle->response_map);
1408 set_bit(to, mle->maybe_map);
1411 mlog(0, "node %u hit an error, resending\n", to);
1499 * there is some extra work that needs to
1501 * caused all nodes up to this one to
1502 * create mles. this node now needs to
1516 * being blocked, or it is actively trying to
1535 // "lockres to be mastered\n");
1538 mlog(0, "node %u is master, but trying to migrate to "
1542 "node is trying to migrate it to %u?!\n",
1564 // mlog(0, "this node is attempting to "
1575 /* keep the mle attached to heartbeat events */
1638 /* keep the mle attached to heartbeat events */
1647 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1655 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1679 int to, tmpret;
1697 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1701 mlog(0, "sending assert master to %d (%.*s)\n", to,
1710 &assert, sizeof(assert), to, &r);
1718 mlog(0, "link to %d went down!\n", to);
1724 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1725 "got %d.\n", namelen, lockname, to, r);
1742 namelen, lockname, to);
1748 namelen, lockname, to);
1752 mlog(0, "%.*s: node %u has a reference to this "
1754 namelen, lockname, to);
1756 dlm_lockres_set_refmap_bit(to, res);
1834 * YES to mastery requests, but this node
1864 * now check to see if there is a lockres */
1935 * then the calling node needs to re-assert to clear
1953 "from %u to %u\n",
2024 mlog(0, "need to tell master to reassert\n");
2034 /* let the master know we have a reference to the lockres */
2121 /* if is this just to clear up mles for nodes below
2122 * this node, do not send the message to the original
2136 * If we're migrating this lock to someone else, we are no
2137 * longer allowed to assert out own mastery. OTOH, we need to
2143 mlog(0, "Someone asked us to assert mastery, but we're "
2154 mlog(0, "worker about to master %.*s here, this=%u\n",
2158 /* no need to restart, we are done */
2173 * We cannot wait for node recovery to complete to begin mastering this
2174 * lockres because this lockres is used to kick off recovery! ;-)
2175 * So, do a pre-check on all living nodes to see if any of those nodes
2177 * we wait a short time to allow that node to get notified by its own
2180 * fired, so we can know for sure that it is safe to continue once
2195 /* do not send to self */
2209 /* check to see if this master is in the recovery map */
2243 mlog(0, "%s:%.*s: sending deref to %d\n",
2324 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2384 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2394 * if not. If 0, numlocks is set to the number of locks in the lockres.
2470 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2483 /* no work to do */
2510 * find a node to migrate the lockres to
2537 * add the migration mle to the list
2553 * if we fail after this we need to re-dirty the lockres
2556 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2606 * this serves as notice to the target node that a
2612 mlog(0, "migration to node %u failed with %d\n",
2625 /* at this point, the target sends a message to all nodes,
2627 * we had to put an mle in the list to begin the process. this
2628 * node now waits for target to do an assert master. this node
2633 * mle and sets the master to UNKNONWN. */
2636 /* wait for new node to assert master */
2650 * to a node which also goes down */
2713 * Called with the dlm spinlock held, may drop it to do migration, but
2727 "trying to free this but locks remain\n",
2734 /* No need to migrate a lockres having no locks */
2811 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2814 /* need to set MIGRATING flag on lockres. this is done by
2819 /* strategy is to reserve an extra ast then release
2827 * try to dirty the lockres before MIGRATING is set */
2836 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2839 * will pass thru immediately. otherwise, we need to wait
2840 * for the last ast to finish. */
2859 * the unneded state which blocked threads trying to DIRTY */
2880 * o all processes trying to reserve an ast on this
2881 * lockres must wait for the MIGRATING flag to clear
2887 * original master calls this to free all of the dlm_lock
2888 * structures that used to be for other nodes. */
2927 mlog(0, "%s:%.*s: node %u had a ref to this "
2937 * need stats to make this do the right thing.
2954 /* up to the caller to make sure this node
2981 mlog(0, "giving up. no master to migrate to\n");
3006 /* send message to all nodes, except the master and myself */
3044 * since the process that put the mle on the list still has a reference to it,
3046 * we will have no mle in the list to start with. now we can add an mle for
3086 * dead. what can we do here? drop it to the floor? */
3152 /* ah another process raced me to it */
3153 mlog(0, "tried to migrate %.*s, but some "
3154 "process beat me to it\n",
3158 /* bad. 2 NODES are trying to migrate! */
3180 "telling master to get ref for cleared out mle "
3187 /* now add a migration mle to the tail of the list */
3229 * need to clean up if the dead node would have
3264 * are sent to whichever node becomes the recovery
3267 * this lockres, or if he needs to take over
3269 * another message to resolve this. */
3274 /* if we have reached this point, this mle needs to
3287 "%u to %u!\n", dlm->name, dead_node,
3290 * mle, find it and set its owner to UNKNOWN */
3296 * lock ordering is messed. we need to drop
3298 * lockres lock, meaning that we will have to
3310 /* about to get rid of mle, detach from heartbeat */
3348 mlog(0, "now time to do a migrate request to other nodes\n");
3356 mlog(0, "doing assert master of %.*s to all except the original node\n",
3363 /* no longer need to retry. all living nodes contacted. */
3370 mlog(0, "doing assert master of %.*s back to %u\n",
3375 mlog(0, "assert master to original master failed "
3396 * this is integral to migration
3399 /* for future intent to call an ast, reserve one ahead of time.
3415 * used to drop the reserved ast, either because it went unused,