Lines Matching defs:osdc

69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_locked(&osdc->lock));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
79 struct ceph_osd_client *osdc = osd->o_osdc;
82 rwsem_is_locked(&osdc->lock)) &&
83 !rwsem_is_wrlocked(&osdc->lock));
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
567 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
577 req = mempool_alloc(osdc->req_mempool, gfp_flags);
588 req->r_osdc = osdc;
608 struct ceph_osd_client *osdc = req->r_osdc;
634 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
651 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
1078 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1100 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1133 req->r_flags = flags | osdc->client->options->read_from_replica;
1195 static void for_each_request(struct ceph_osd_client *osdc,
1201 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1214 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1282 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1290 osd->o_osdc = osdc;
1296 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1327 struct ceph_osd_client *osdc = osd->o_osdc;
1332 spin_lock(&osdc->osd_lru_lock);
1333 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1334 spin_unlock(&osdc->osd_lru_lock);
1336 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1348 struct ceph_osd_client *osdc = osd->o_osdc;
1352 spin_lock(&osdc->osd_lru_lock);
1355 spin_unlock(&osdc->osd_lru_lock);
1364 struct ceph_osd_client *osdc = osd->o_osdc;
1367 verify_osdc_wrlocked(osdc);
1380 link_request(&osdc->homeless_osd, req);
1391 link_linger(&osdc->homeless_osd, lreq);
1396 erase_osd(&osdc->osds, osd);
1439 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1445 verify_osdc_wrlocked(osdc);
1447 verify_osdc_locked(osdc);
1450 osd = lookup_osd(&osdc->osds, o);
1452 osd = &osdc->homeless_osd;
1457 osd = create_osd(osdc, o);
1458 insert_osd(&osdc->osds, osd);
1460 &osdc->osdmap->osd_addr[osd->o_osd]);
1463 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1515 static bool have_pool_full(struct ceph_osd_client *osdc)
1519 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1530 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1534 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1545 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1549 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1550 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1551 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1557 (osdc->osdmap->epoch < osdc->epoch_barrier);
1574 static int pick_closest_replica(struct ceph_osd_client *osdc,
1577 struct ceph_options *opt = osdc->client->options;
1582 locality = ceph_get_crush_locality(osdc->osdmap,
1605 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1618 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1619 bool recovery_deletes = ceph_osdmap_flag(osdc,
1623 t->epoch = osdc->osdmap->epoch;
1624 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1631 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1649 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1661 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1680 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1692 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1712 pos = pick_closest_replica(osdc, &acting);
2374 static void maybe_request_map(struct ceph_osd_client *osdc)
2378 verify_osdc_locked(osdc);
2379 WARN_ON(!osdc->osdmap->epoch);
2381 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2382 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2383 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2384 dout("%s osdc %p continuous\n", __func__, osdc);
2387 dout("%s osdc %p onetime\n", __func__, osdc);
2390 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2391 osdc->osdmap->epoch + 1, continuous))
2392 ceph_monc_renew_subs(&osdc->client->monc);
2400 struct ceph_osd_client *osdc = req->r_osdc;
2411 ct_res = calc_target(osdc, &req->r_t, false);
2415 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2421 if (osdc->abort_err) {
2422 dout("req %p abort_err %d\n", req, osdc->abort_err);
2423 err = osdc->abort_err;
2424 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2425 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2426 osdc->epoch_barrier);
2428 maybe_request_map(osdc);
2430 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2433 maybe_request_map(osdc);
2435 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2438 maybe_request_map(osdc);
2442 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2443 pool_full(osdc, req->r_t.base_oloc.pool))) {
2445 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2448 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
2454 maybe_request_map(osdc);
2459 maybe_request_map(osdc);
2468 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2480 downgrade_write(&osdc->lock);
2484 up_read(&osdc->lock);
2485 down_write(&osdc->lock);
2512 struct ceph_osd_client *osdc = req->r_osdc;
2514 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2523 atomic_dec(&osdc->num_requests);
2570 struct ceph_osd_client *osdc = req->r_osdc;
2573 verify_osdc_wrlocked(osdc);
2575 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2580 erase_request_mc(&osdc->map_checks, req);
2614 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2616 dout("%s osdc %p err %d\n", __func__, osdc, err);
2617 down_write(&osdc->lock);
2618 for_each_request(osdc, abort_fn, &err);
2619 osdc->abort_err = err;
2620 up_write(&osdc->lock);
2624 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2626 down_write(&osdc->lock);
2627 osdc->abort_err = 0;
2628 up_write(&osdc->lock);
2632 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2634 if (likely(eb > osdc->epoch_barrier)) {
2636 osdc->epoch_barrier, eb);
2637 osdc->epoch_barrier = eb;
2639 if (eb > osdc->osdmap->epoch)
2640 maybe_request_map(osdc);
2644 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2646 down_read(&osdc->lock);
2647 if (unlikely(eb > osdc->epoch_barrier)) {
2648 up_read(&osdc->lock);
2649 down_write(&osdc->lock);
2650 update_epoch_barrier(osdc, eb);
2651 up_write(&osdc->lock);
2653 up_read(&osdc->lock);
2666 struct ceph_osd_client *osdc = req->r_osdc;
2670 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2671 pool_full(osdc, req->r_t.base_oloc.pool))) {
2673 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2685 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2688 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2692 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2693 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2694 for_each_request(osdc, abort_on_full_fn, &victims);
2699 struct ceph_osd_client *osdc = req->r_osdc;
2700 struct ceph_osdmap *map = osdc->osdmap;
2702 verify_osdc_wrlocked(osdc);
2733 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2739 down_write(&osdc->lock);
2740 req = lookup_request_mc(&osdc->map_checks, tid);
2750 erase_request_mc(&osdc->map_checks, req);
2755 up_write(&osdc->lock);
2760 struct ceph_osd_client *osdc = req->r_osdc;
2764 verify_osdc_wrlocked(osdc);
2766 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2773 insert_request_mc(&osdc->map_checks, req);
2774 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2821 linger_alloc(struct ceph_osd_client *osdc)
2839 lreq->osdc = osdc;
2893 verify_osdc_locked(lreq->osdc);
2900 struct ceph_osd_client *osdc = lreq->osdc;
2903 down_read(&osdc->lock);
2905 up_read(&osdc->lock);
2912 struct ceph_osd_client *osdc = lreq->osdc;
2914 verify_osdc_wrlocked(osdc);
2918 lreq->linger_id = ++osdc->last_linger_id;
2919 insert_linger_osdc(&osdc->linger_requests, lreq);
2924 struct ceph_osd_client *osdc = lreq->osdc;
2926 verify_osdc_wrlocked(osdc);
2928 erase_linger_osdc(&osdc->linger_requests, lreq);
2993 struct ceph_osd_client *osdc = lreq->osdc;
3000 queue_work(osdc->notify_wq, &lwork->work);
3145 struct ceph_osd_client *osdc = lreq->osdc;
3149 verify_osdc_wrlocked(osdc);
3159 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3233 struct ceph_osd_client *osdc = lreq->osdc;
3237 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3253 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3270 req->r_tid = atomic64_inc_return(&osdc->last_tid);
3277 struct ceph_osd_client *osdc = lreq->osdc;
3280 down_write(&osdc->lock);
3283 calc_target(osdc, &lreq->t, false);
3284 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3288 up_write(&osdc->lock);
3293 struct ceph_osd_client *osdc = lreq->osdc;
3296 verify_osdc_wrlocked(osdc);
3298 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3304 erase_linger_mc(&osdc->linger_map_checks, lreq);
3324 struct ceph_osd_client *osdc = lreq->osdc;
3326 down_write(&osdc->lock);
3329 up_write(&osdc->lock);
3336 struct ceph_osd_client *osdc = lreq->osdc;
3337 struct ceph_osdmap *map = osdc->osdmap;
3339 verify_osdc_wrlocked(osdc);
3367 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3373 down_write(&osdc->lock);
3374 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3385 erase_linger_mc(&osdc->linger_map_checks, lreq);
3390 up_write(&osdc->lock);
3395 struct ceph_osd_client *osdc = lreq->osdc;
3399 verify_osdc_wrlocked(osdc);
3401 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3409 insert_linger_mc(&osdc->linger_map_checks, lreq);
3410 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3448 struct ceph_osd_client *osdc =
3450 struct ceph_options *opts = osdc->client->options;
3456 dout("%s osdc %p\n", __func__, osdc);
3457 down_write(&osdc->lock);
3464 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3505 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3513 req->r_tid, osdc->homeless_osd.o_osd);
3519 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3520 maybe_request_map(osdc);
3530 up_write(&osdc->lock);
3531 schedule_delayed_work(&osdc->timeout_work,
3532 osdc->client->options->osd_keepalive_timeout);
3537 struct ceph_osd_client *osdc =
3540 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3543 dout("%s osdc %p\n", __func__, osdc);
3544 down_write(&osdc->lock);
3545 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3554 up_write(&osdc->lock);
3555 schedule_delayed_work(&osdc->osds_timeout_work,
3768 struct ceph_osd_client *osdc = osd->o_osdc;
3778 down_read(&osdc->lock);
3885 up_read(&osdc->lock);
3895 up_read(&osdc->lock);
3898 static void set_pool_was_full(struct ceph_osd_client *osdc)
3902 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3910 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3914 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3924 struct ceph_osd_client *osdc = lreq->osdc;
3927 ct_res = calc_target(osdc, &lreq->t, true);
3931 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3951 struct ceph_osd_client *osdc = osd->o_osdc;
3969 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3999 ct_res = calc_target(osdc, &req->r_t, false);
4004 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
4023 static int handle_one_map(struct ceph_osd_client *osdc,
4033 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
4034 set_pool_was_full(osdc);
4038 ceph_msgr2(osdc->client),
4039 osdc->osdmap);
4041 newmap = ceph_osdmap_decode(&p, end, ceph_msgr2(osdc->client));
4045 if (newmap != osdc->osdmap) {
4056 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
4063 if (osdc->osdmap->epoch &&
4064 osdc->osdmap->epoch + 1 < newmap->epoch) {
4069 ceph_osdmap_destroy(osdc->osdmap);
4070 osdc->osdmap = newmap;
4073 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
4074 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
4077 for (n = rb_first(&osdc->osds); n; ) {
4084 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
4086 ceph_osd_addr(osdc->osdmap, osd->o_osd),
4094 static void kick_requests(struct ceph_osd_client *osdc,
4109 if (req->r_t.epoch < osdc->osdmap->epoch) {
4110 ct_res = calc_target(osdc, &req->r_t, false);
4126 osd = lookup_create_osd(osdc, req->r_t.osd, true);
4151 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4165 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4166 down_write(&osdc->lock);
4171 if (ceph_check_fsid(osdc->client, &fsid) < 0)
4174 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4175 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4176 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4177 have_pool_full(osdc);
4187 if (osdc->osdmap->epoch &&
4188 osdc->osdmap->epoch + 1 == epoch) {
4191 err = handle_one_map(osdc, p, p + maplen, true,
4217 } else if (osdc->osdmap->epoch >= epoch) {
4220 osdc->osdmap->epoch);
4223 err = handle_one_map(osdc, p, p + maplen, false,
4238 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4239 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4240 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4241 have_pool_full(osdc);
4243 osdc->osdmap->epoch < osdc->epoch_barrier)
4244 maybe_request_map(osdc);
4246 kick_requests(osdc, &need_resend, &need_resend_linger);
4248 ceph_osdc_abort_on_full(osdc);
4249 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4250 osdc->osdmap->epoch);
4251 up_write(&osdc->lock);
4252 wake_up_all(&osdc->client->auth_wq);
4256 pr_err("osdc handle_map corrupt msg\n");
4258 up_write(&osdc->lock);
4297 struct ceph_osd_client *osdc = osd->o_osdc;
4301 down_write(&osdc->lock);
4309 maybe_request_map(osdc);
4312 up_write(&osdc->lock);
4531 struct ceph_osd_client *osdc = osd->o_osdc;
4535 down_read(&osdc->lock);
4538 up_read(&osdc->lock);
4567 up_read(&osdc->lock);
4573 static void handle_watch_notify(struct ceph_osd_client *osdc,
4606 down_read(&osdc->lock);
4607 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4662 up_read(&osdc->lock);
4666 pr_err("osdc handle_watch_notify corrupt msg\n");
4672 void ceph_osdc_start_request(struct ceph_osd_client *osdc,
4675 down_read(&osdc->lock);
4677 up_read(&osdc->lock);
4692 struct ceph_osd_client *osdc = req->r_osdc;
4694 down_write(&osdc->lock);
4697 up_write(&osdc->lock);
4725 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4735 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4738 u64 last_tid = atomic64_read(&osdc->last_tid);
4741 down_read(&osdc->lock);
4742 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4758 up_read(&osdc->lock);
4769 up_read(&osdc->lock);
4778 ceph_osdc_watch(struct ceph_osd_client *osdc,
4788 lreq = linger_alloc(osdc);
4825 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4828 struct ceph_options *opts = osdc->client->options;
4832 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4847 ceph_osdc_start_request(osdc, req);
4890 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4901 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4918 ceph_osdc_start_request(osdc, req);
4919 ret = ceph_osdc_wait_request(osdc, req);
4935 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4953 lreq = linger_alloc(osdc);
5007 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
5013 down_read(&osdc->lock);
5032 up_read(&osdc->lock);
5102 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5112 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5135 ceph_osdc_start_request(osdc, req);
5136 ret = ceph_osdc_wait_request(osdc, req);
5154 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5156 dout("%s osdc %p\n", __func__, osdc);
5157 flush_workqueue(osdc->notify_wq);
5161 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5163 down_read(&osdc->lock);
5164 maybe_request_map(osdc);
5165 up_read(&osdc->lock);
5175 int ceph_osdc_call(struct ceph_osd_client *osdc,
5189 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5212 ceph_osdc_start_request(osdc, req);
5213 ret = ceph_osdc_wait_request(osdc, req);
5229 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5233 down_write(&osdc->lock);
5234 for (n = rb_first(&osdc->osds); n; ) {
5241 up_write(&osdc->lock);
5247 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5252 osdc->client = client;
5253 init_rwsem(&osdc->lock);
5254 osdc->osds = RB_ROOT;
5255 INIT_LIST_HEAD(&osdc->osd_lru);
5256 spin_lock_init(&osdc->osd_lru_lock);
5257 osd_init(&osdc->homeless_osd);
5258 osdc->homeless_osd.o_osdc = osdc;
5259 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5260 osdc->last_linger_id = CEPH_LINGER_ID_START;
5261 osdc->linger_requests = RB_ROOT;
5262 osdc->map_checks = RB_ROOT;
5263 osdc->linger_map_checks = RB_ROOT;
5264 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5265 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5268 osdc->osdmap = ceph_osdmap_alloc();
5269 if (!osdc->osdmap)
5272 osdc->req_mempool = mempool_create_slab_pool(10,
5274 if (!osdc->req_mempool)
5277 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5281 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5288 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5289 if (!osdc->notify_wq)
5292 osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5293 if (!osdc->completion_wq)
5296 schedule_delayed_work(&osdc->timeout_work,
5297 osdc->client->options->osd_keepalive_timeout);
5298 schedule_delayed_work(&osdc->osds_timeout_work,
5299 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5304 destroy_workqueue(osdc->notify_wq);
5306 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5308 ceph_msgpool_destroy(&osdc->msgpool_op);
5310 mempool_destroy(osdc->req_mempool);
5312 ceph_osdmap_destroy(osdc->osdmap);
5317 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5319 destroy_workqueue(osdc->completion_wq);
5320 destroy_workqueue(osdc->notify_wq);
5321 cancel_delayed_work_sync(&osdc->timeout_work);
5322 cancel_delayed_work_sync(&osdc->osds_timeout_work);
5324 down_write(&osdc->lock);
5325 while (!RB_EMPTY_ROOT(&osdc->osds)) {
5326 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5330 up_write(&osdc->lock);
5331 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5332 osd_cleanup(&osdc->homeless_osd);
5334 WARN_ON(!list_empty(&osdc->osd_lru));
5335 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5336 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5337 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5338 WARN_ON(atomic_read(&osdc->num_requests));
5339 WARN_ON(atomic_read(&osdc->num_homeless));
5341 ceph_osdmap_destroy(osdc->osdmap);
5342 mempool_destroy(osdc->req_mempool);
5343 ceph_msgpool_destroy(&osdc->msgpool_op);
5344 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5410 struct ceph_osd_client *osdc = osd->o_osdc;
5415 ceph_osdc_handle_map(osdc, msg);
5424 handle_watch_notify(osdc, msg);
5463 struct ceph_osd_client *osdc = osd->o_osdc;
5471 down_read(&osdc->lock);
5520 up_read(&osdc->lock);
5603 struct ceph_osd_client *osdc = o->o_osdc;
5604 struct ceph_auth_client *ac = osdc->client->monc.auth;
5620 struct ceph_osd_client *osdc = o->o_osdc;
5621 struct ceph_auth_client *ac = osdc->client->monc.auth;
5630 struct ceph_osd_client *osdc = o->o_osdc;
5631 struct ceph_auth_client *ac = osdc->client->monc.auth;
5642 struct ceph_osd_client *osdc = o->o_osdc;
5643 struct ceph_auth_client *ac = osdc->client->monc.auth;
5646 return ceph_monc_validate_auth(&osdc->client->monc);