Lines Matching refs:state

43  * This file contains all the routines used when modifying on-disk SPA state.
1021 * SPA state manipulation (open/create/destroy/import/export)
1847 * in the CLOSED state. This will prep the pool before open/creation/import.
2149 * zdb opens both the current state of the pool and the
2150 * checkpointed state (if present), with a different spa_t.
2153 * them when we load the checkpointed state of the pool.
2281 * zdb opens both the current state of the pool and the
2282 * checkpointed state (if present), with a different spa_t.
2286 * state of the pool.
3336 /* check each device to see what state it's in */
3361 spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
3366 spa->spa_load_state = state;
3445 uint64_t state = 0;
3462 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
3503 if (state != POOL_STATE_ACTIVE)
3857 * look at its checkpointed state from userland tools like zdb.
4054 * If we are opening the checkpointed state of the pool by
4058 * we are opening the checkpointed state read-only, we have
4098 "state=%u offset=%llu",
4519 * If the state is SPA_LOAD_TRYIMPORT, our objective is
4531 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are
5090 * probing the vdev with a dummy I/O. The state of each vdev will be set
5184 * state with zdb, and importing the checkpointed state to get
5435 * state. When performing an extreme rewind, we verify the whole pool,
5476 NULL, "rewound state to txg=%llu",
5582 spa_load_retry(spa_t *spa, spa_load_state_t state)
5597 return (spa_load(spa, state, SPA_IMPORT_EXISTING));
5602 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
5603 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
5608 spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
5617 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
5626 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
5652 if (state == SPA_LOAD_RECOVER) {
5678 rewind_error = spa_load_retry(spa, state);
5684 if (config && (rewind_error || state != SPA_LOAD_RECOVER))
5689 if (state == SPA_LOAD_RECOVER) {
5713 * POOL_STATE_UNINITIALIZED state.
5717 * ambiguous state.
5724 spa_load_state_t state = SPA_LOAD_OPEN;
5756 state = SPA_LOAD_RECOVER;
5760 if (state != SPA_LOAD_RECOVER)
5765 error = spa_load_best(spa, state, policy.zlp_txg,
5788 * information: the state of each vdev after the
5816 if (state == SPA_LOAD_RECOVER && config != NULL) {
6634 spa_load_state_t state = SPA_LOAD_IMPORT;
6688 state = SPA_LOAD_RECOVER;
6692 if (state != SPA_LOAD_RECOVER) {
6699 error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
6815 uint64_t state;
6822 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
6876 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, state);
6940 * update the pool state and sync all the labels to disk, removing the
6993 * modify its state. Objsets may be open only because they're dirty,
7512 * attach to a spared vdev child unless the 'isspare' state is
7623 * Reevaluate the parent vdev state.
7916 * Reevaluate the parent vdev state.
8076 * stopping initialization. The config and state locks are held so that
8077 * we can properly assess the vdev state before we commit to
8101 /* Sync out the initializing state */
8197 * stopping TRIM. The config and state locks are held so that
8198 * we can properly assess the vdev state before we commit to
8222 /* Sync out the TRIM state */
8443 * state to ACTIVE so that we know to resume initializing or TRIM
8626 * Also potentially update faulted state.
8839 * degraded/faulted state as well as attempt to reopen the
10045 * If there are any pending vdev state changes, convert them
10376 * Remove all cached state. All pools should be closed now,
10623 * in-memory representation of the relevant on-disk state which can be used to
10624 * determine whether or not the activity is in progress. The in-memory state and
10626 * not be suitable for use with a cvar (e.g., some state is protected by the
10630 * When the state is checked, both the activity-specific lock (if there is one)
10638 * activity, updates the state of the activity and then calls
10640 * needs to hold its activity-specific lock when updating the state, and this
10644 * and because it is held when the waiting thread checks the state of the
10646 * the activity state and cv_broadcasts in between the waiting thread's check
10856 /* state manipulation functions */