Lines Matching defs:kc

210 static void wake(struct dm_kcopyd_client *kc)
212 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
245 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
252 if (kc->nr_free_pages >= kc->nr_reserved_pages)
255 pl->next = kc->pages;
256 kc->pages = pl;
257 kc->nr_free_pages++;
264 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
275 pl = kc->pages;
278 kc->pages = pl->next;
279 kc->nr_free_pages--;
289 kcopyd_put_pages(kc, *pages);
310 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
326 kc->nr_reserved_pages += nr_pages;
327 kcopyd_put_pages(kc, pl);
332 static void client_free_pages(struct dm_kcopyd_client *kc)
334 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages);
335 drop_pages(kc->pages);
336 kc->pages = NULL;
337 kc->nr_free_pages = kc->nr_reserved_pages = 0;
348 struct dm_kcopyd_client *kc;
418 struct dm_kcopyd_client *kc)
444 struct dm_kcopyd_client *kc)
448 spin_lock_irq(&kc->job_lock);
451 if (jobs == &kc->io_jobs)
452 job = pop_io_job(jobs, kc);
458 spin_unlock_irq(&kc->job_lock);
466 struct dm_kcopyd_client *kc = job->kc;
468 spin_lock_irqsave(&kc->job_lock, flags);
470 spin_unlock_irqrestore(&kc->job_lock, flags);
476 struct dm_kcopyd_client *kc = job->kc;
478 spin_lock_irq(&kc->job_lock);
480 spin_unlock_irq(&kc->job_lock);
498 struct dm_kcopyd_client *kc = job->kc;
501 kcopyd_put_pages(kc, job->pages);
508 mempool_free(job, &kc->job_pool);
512 if (atomic_dec_and_test(&kc->nr_jobs))
513 wake_up(&kc->destroyq);
523 struct dm_kcopyd_client *kc = job->kc;
525 io_job_finish(kc->throttle);
534 push(&kc->complete_jobs, job);
535 wake(kc);
541 push(&kc->complete_jobs, job);
545 push(&kc->io_jobs, job);
548 wake(kc);
565 .client = job->kc->io_client,
578 io_job_start(job->kc->throttle);
593 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
596 push(&job->kc->io_jobs, job);
611 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
617 while ((job = pop(jobs, kc))) {
627 push(&kc->complete_jobs, job);
628 wake(kc);
652 struct dm_kcopyd_client *kc = container_of(work,
663 spin_lock_irq(&kc->job_lock);
664 list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
665 spin_unlock_irq(&kc->job_lock);
668 process_jobs(&kc->complete_jobs, kc, run_complete_job);
669 process_jobs(&kc->pages_jobs, kc, run_pages_job);
670 process_jobs(&kc->io_jobs, kc, run_io_job);
681 struct dm_kcopyd_client *kc = job->kc;
683 atomic_inc(&kc->nr_jobs);
685 push(&kc->callback_jobs, job);
687 push(&kc->io_jobs, job);
689 push(&kc->pages_jobs, job);
690 wake(kc);
701 struct dm_kcopyd_client *kc = job->kc;
721 if (count > kc->sub_job_size)
722 count = kc->sub_job_size;
757 push(&kc->complete_jobs, job);
758 wake(kc);
769 atomic_inc(&master_job->kc->nr_jobs);
778 void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
789 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
795 job->kc = kc;
849 if (job->source.count <= kc->sub_job_size)
858 void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
862 dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
866 void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
871 job = mempool_alloc(&kc->job_pool, GFP_NOIO);
874 job->kc = kc;
879 atomic_inc(&kc->nr_jobs);
888 struct dm_kcopyd_client *kc = job->kc;
893 push(&kc->callback_jobs, job);
894 wake(kc);
919 struct dm_kcopyd_client *kc;
921 kc = kzalloc(sizeof(*kc), GFP_KERNEL);
922 if (!kc)
925 spin_lock_init(&kc->job_lock);
926 INIT_LIST_HEAD(&kc->callback_jobs);
927 INIT_LIST_HEAD(&kc->complete_jobs);
928 INIT_LIST_HEAD(&kc->io_jobs);
929 INIT_LIST_HEAD(&kc->pages_jobs);
930 kc->throttle = throttle;
932 r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache);
936 INIT_WORK(&kc->kcopyd_work, do_work);
937 kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0);
938 if (!kc->kcopyd_wq) {
943 kc->sub_job_size = dm_get_kcopyd_subjob_size();
944 reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE);
946 kc->pages = NULL;
947 kc->nr_reserved_pages = kc->nr_free_pages = 0;
948 r = client_reserve_pages(kc, reserve_pages);
952 kc->io_client = dm_io_client_create();
953 if (IS_ERR(kc->io_client)) {
954 r = PTR_ERR(kc->io_client);
958 init_waitqueue_head(&kc->destroyq);
959 atomic_set(&kc->nr_jobs, 0);
961 return kc;
964 client_free_pages(kc);
966 destroy_workqueue(kc->kcopyd_wq);
968 mempool_exit(&kc->job_pool);
970 kfree(kc);
976 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
979 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
981 BUG_ON(!list_empty(&kc->callback_jobs));
982 BUG_ON(!list_empty(&kc->complete_jobs));
983 BUG_ON(!list_empty(&kc->io_jobs));
984 BUG_ON(!list_empty(&kc->pages_jobs));
985 destroy_workqueue(kc->kcopyd_wq);
986 dm_io_client_destroy(kc->io_client);
987 client_free_pages(kc);
988 mempool_exit(&kc->job_pool);
989 kfree(kc);
993 void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc)
995 flush_workqueue(kc->kcopyd_wq);