• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/contrib/openzfs/module/zfs/

Lines Matching refs:mmp

26 #include <sys/mmp.h>
80 * the mmp write interval.
89 * the pool imported will suspend the pool if no mmp writes land within
150 * Used to control the frequency of mmp writes which are performed when the
154 * On average an mmp write will be issued for each leaf vdev every
171 * Controls the behavior of the pool when mmp write failures or delays are
174 * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
181 * without a successful mmp write. This guarantees the activity test will see
182 * mmp writes if the pool is imported. A value of 1 is ignored and treated as
195 mmp_thread_t *mmp = &spa->spa_mmp;
197 mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
198 cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
199 mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
200 mmp->mmp_kstat_id = 1;
206 mmp_thread_t *mmp = &spa->spa_mmp;
208 mutex_destroy(&mmp->mmp_thread_lock);
209 cv_destroy(&mmp->mmp_thread_cv);
210 mutex_destroy(&mmp->mmp_io_lock);
214 mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
216 CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
217 mutex_enter(&mmp->mmp_thread_lock);
221 mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
225 cv_broadcast(&mmp->mmp_thread_cv);
226 CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
233 mmp_thread_t *mmp = &spa->spa_mmp;
236 mutex_enter(&mmp->mmp_thread_lock);
237 if (!mmp->mmp_thread) {
238 mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
243 mutex_exit(&mmp->mmp_thread_lock);
250 mmp_thread_t *mmp = &spa->spa_mmp;
252 mutex_enter(&mmp->mmp_thread_lock);
253 mmp->mmp_thread_exiting = 1;
254 cv_broadcast(&mmp->mmp_thread_cv);
256 while (mmp->mmp_thread) {
257 cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
259 mutex_exit(&mmp->mmp_thread_lock);
263 ASSERT(mmp->mmp_thread == NULL);
264 mmp->mmp_thread_exiting = 0;
274 * mmp write (if so a new write will also likely block). If there is no usable
349 * If an mmp write was skipped or fails, and we have already waited longer than
416 * in the mmp thread state, used for mmp writes.
421 mmp_thread_t *mmp = &spa->spa_mmp;
423 mutex_enter(&mmp->mmp_io_lock);
424 mmp->mmp_ub = *ub;
425 mmp->mmp_seq = 1;
426 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
428 mutex_exit(&mmp->mmp_io_lock);
440 mmp_thread_t *mmp = &spa->spa_mmp;
454 mutex_enter(&mmp->mmp_io_lock);
468 if (mmp->mmp_skip_error == error) {
469 spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
471 mmp->mmp_skip_error = error;
472 spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
473 gethrestime_sec(), mmp->mmp_delay, NULL, 0,
474 mmp->mmp_kstat_id++, error);
479 mutex_exit(&mmp->mmp_io_lock);
485 if (mmp->mmp_skip_error != 0) {
486 mmp->mmp_skip_error = 0;
492 if (mmp->mmp_zio_root == NULL)
493 mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
496 if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
502 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
503 mmp->mmp_seq = 1;
506 ub = &mmp->mmp_ub;
508 ub->ub_mmp_delay = mmp->mmp_delay;
509 ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
514 vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
516 zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
521 mmp->mmp_seq++;
522 mmp->mmp_kstat_id++;
523 mutex_exit(&mmp->mmp_io_lock);
530 VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
543 mmp_thread_t *mmp = &spa->spa_mmp;
559 mmp_thread_enter(mmp, &cpr);
568 mutex_enter(&mmp->mmp_io_lock);
569 mmp->mmp_last_write = gethrtime();
570 mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
571 mutex_exit(&mmp->mmp_io_lock);
573 while (!mmp->mmp_thread_exiting) {
638 mutex_enter(&mmp->mmp_io_lock);
639 mmp->mmp_last_write = gethrtime();
640 mmp->mmp_delay = mmp_interval;
641 mutex_exit(&mmp->mmp_io_lock);
649 mutex_enter(&mmp->mmp_io_lock);
650 mmp->mmp_delay = 0;
651 mutex_exit(&mmp->mmp_io_lock);
659 (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
664 (u_longlong_t)mmp->mmp_last_write,
672 NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
687 (void) cv_timedwait_idle_hires(&mmp->mmp_thread_cv,
688 &mmp->mmp_thread_lock, next_time, USEC2NSEC(100),
690 CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
694 zio_wait(mmp->mmp_zio_root);
696 mmp->mmp_zio_root = NULL;
697 mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
704 * Only signal if the pool is active and mmp thread is
710 mmp_thread_t *mmp = &spa->spa_mmp;
712 mutex_enter(&mmp->mmp_thread_lock);
713 if (mmp->mmp_thread)
714 cv_broadcast(&mmp->mmp_thread_cv);
715 mutex_exit(&mmp->mmp_thread_lock);
734 "Milliseconds between mmp writes to each leaf");
738 "Max allowed period without a successful mmp write");