1/* 2 * Copyright (C) 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 * 7 * Kcopyd provides a simple interface for copying an area of one 8 * block-device to one or more other block-devices, with an asynchronous 9 * completion notification. 10 */ 11 12#include <linux/types.h> 13#include <asm/atomic.h> 14#include <linux/blkdev.h> 15#include <linux/fs.h> 16#include <linux/init.h> 17#include <linux/list.h> 18#include <linux/mempool.h> 19#include <linux/module.h> 20#include <linux/pagemap.h> 21#include <linux/slab.h> 22#include <linux/vmalloc.h> 23#include <linux/workqueue.h> 24#include <linux/mutex.h> 25#include <linux/device-mapper.h> 26#include <linux/dm-kcopyd.h> 27 28#include "dm.h" 29 30/*----------------------------------------------------------------- 31 * Each kcopyd client has its own little pool of preallocated 32 * pages for kcopyd io. 33 *---------------------------------------------------------------*/ 34struct dm_kcopyd_client { 35 spinlock_t lock; 36 struct page_list *pages; 37 unsigned int nr_pages; 38 unsigned int nr_free_pages; 39 40 struct dm_io_client *io_client; 41 42 wait_queue_head_t destroyq; 43 atomic_t nr_jobs; 44 45 mempool_t *job_pool; 46 47 struct workqueue_struct *kcopyd_wq; 48 struct work_struct kcopyd_work; 49 50/* 51 * We maintain three lists of jobs: 52 * 53 * i) jobs waiting for pages 54 * ii) jobs that have pages, and are waiting for the io to be issued. 55 * iii) jobs that have completed. 56 * 57 * All three of these are protected by job_lock. 58 */ 59 spinlock_t job_lock; 60 struct list_head complete_jobs; 61 struct list_head io_jobs; 62 struct list_head pages_jobs; 63}; 64 65static void wake(struct dm_kcopyd_client *kc) 66{ 67 queue_work(kc->kcopyd_wq, &kc->kcopyd_work); 68} 69 70static struct page_list *alloc_pl(void) 71{ 72 struct page_list *pl; 73 74 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 75 if (!pl) 76 return NULL; 77 78 pl->page = alloc_page(GFP_KERNEL); 79 if (!pl->page) { 80 kfree(pl); 81 return NULL; 82 } 83 84 return pl; 85} 86 87static void free_pl(struct page_list *pl) 88{ 89 __free_page(pl->page); 90 kfree(pl); 91} 92 93static int kcopyd_get_pages(struct dm_kcopyd_client *kc, 94 unsigned int nr, struct page_list **pages) 95{ 96 struct page_list *pl; 97 98 spin_lock(&kc->lock); 99 if (kc->nr_free_pages < nr) { 100 spin_unlock(&kc->lock); 101 return -ENOMEM; 102 } 103 104 kc->nr_free_pages -= nr; 105 for (*pages = pl = kc->pages; --nr; pl = pl->next) 106 ; 107 108 kc->pages = pl->next; 109 pl->next = NULL; 110 111 spin_unlock(&kc->lock); 112 113 return 0; 114} 115 116static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) 117{ 118 struct page_list *cursor; 119 120 spin_lock(&kc->lock); 121 for (cursor = pl; cursor->next; cursor = cursor->next) 122 kc->nr_free_pages++; 123 124 kc->nr_free_pages++; 125 cursor->next = kc->pages; 126 kc->pages = pl; 127 spin_unlock(&kc->lock); 128} 129 130/* 131 * These three functions resize the page pool. 132 */ 133static void drop_pages(struct page_list *pl) 134{ 135 struct page_list *next; 136 137 while (pl) { 138 next = pl->next; 139 free_pl(pl); 140 pl = next; 141 } 142} 143 144static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) 145{ 146 unsigned int i; 147 struct page_list *pl = NULL, *next; 148 149 for (i = 0; i < nr; i++) { 150 next = alloc_pl(); 151 if (!next) { 152 if (pl) 153 drop_pages(pl); 154 return -ENOMEM; 155 } 156 next->next = pl; 157 pl = next; 158 } 159 160 kcopyd_put_pages(kc, pl); 161 kc->nr_pages += nr; 162 return 0; 163} 164 165static void client_free_pages(struct dm_kcopyd_client *kc) 166{ 167 BUG_ON(kc->nr_free_pages != kc->nr_pages); 168 drop_pages(kc->pages); 169 kc->pages = NULL; 170 kc->nr_free_pages = kc->nr_pages = 0; 171} 172 173/*----------------------------------------------------------------- 174 * kcopyd_jobs need to be allocated by the *clients* of kcopyd, 175 * for this reason we use a mempool to prevent the client from 176 * ever having to do io (which could cause a deadlock). 177 *---------------------------------------------------------------*/ 178struct kcopyd_job { 179 struct dm_kcopyd_client *kc; 180 struct list_head list; 181 unsigned long flags; 182 183 /* 184 * Error state of the job. 185 */ 186 int read_err; 187 unsigned long write_err; 188 189 /* 190 * Either READ or WRITE 191 */ 192 int rw; 193 struct dm_io_region source; 194 195 /* 196 * The destinations for the transfer. 197 */ 198 unsigned int num_dests; 199 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; 200 201 sector_t offset; 202 unsigned int nr_pages; 203 struct page_list *pages; 204 205 /* 206 * Set this to ensure you are notified when the job has 207 * completed. 'context' is for callback to use. 208 */ 209 dm_kcopyd_notify_fn fn; 210 void *context; 211 212 /* 213 * These fields are only used if the job has been split 214 * into more manageable parts. 215 */ 216 struct mutex lock; 217 atomic_t sub_jobs; 218 sector_t progress; 219}; 220 221#define MIN_JOBS 512 222 223static struct kmem_cache *_job_cache; 224 225int __init dm_kcopyd_init(void) 226{ 227 _job_cache = KMEM_CACHE(kcopyd_job, 0); 228 if (!_job_cache) 229 return -ENOMEM; 230 231 return 0; 232} 233 234void dm_kcopyd_exit(void) 235{ 236 kmem_cache_destroy(_job_cache); 237 _job_cache = NULL; 238} 239 240/* 241 * Functions to push and pop a job onto the head of a given job 242 * list. 243 */ 244static struct kcopyd_job *pop(struct list_head *jobs, 245 struct dm_kcopyd_client *kc) 246{ 247 struct kcopyd_job *job = NULL; 248 unsigned long flags; 249 250 spin_lock_irqsave(&kc->job_lock, flags); 251 252 if (!list_empty(jobs)) { 253 job = list_entry(jobs->next, struct kcopyd_job, list); 254 list_del(&job->list); 255 } 256 spin_unlock_irqrestore(&kc->job_lock, flags); 257 258 return job; 259} 260 261static void push(struct list_head *jobs, struct kcopyd_job *job) 262{ 263 unsigned long flags; 264 struct dm_kcopyd_client *kc = job->kc; 265 266 spin_lock_irqsave(&kc->job_lock, flags); 267 list_add_tail(&job->list, jobs); 268 spin_unlock_irqrestore(&kc->job_lock, flags); 269} 270 271 272static void push_head(struct list_head *jobs, struct kcopyd_job *job) 273{ 274 unsigned long flags; 275 struct dm_kcopyd_client *kc = job->kc; 276 277 spin_lock_irqsave(&kc->job_lock, flags); 278 list_add(&job->list, jobs); 279 spin_unlock_irqrestore(&kc->job_lock, flags); 280} 281 282/* 283 * These three functions process 1 item from the corresponding 284 * job list. 285 * 286 * They return: 287 * < 0: error 288 * 0: success 289 * > 0: can't process yet. 290 */ 291static int run_complete_job(struct kcopyd_job *job) 292{ 293 void *context = job->context; 294 int read_err = job->read_err; 295 unsigned long write_err = job->write_err; 296 dm_kcopyd_notify_fn fn = job->fn; 297 struct dm_kcopyd_client *kc = job->kc; 298 299 if (job->pages) 300 kcopyd_put_pages(kc, job->pages); 301 mempool_free(job, kc->job_pool); 302 fn(read_err, write_err, context); 303 304 if (atomic_dec_and_test(&kc->nr_jobs)) 305 wake_up(&kc->destroyq); 306 307 return 0; 308} 309 310static void complete_io(unsigned long error, void *context) 311{ 312 struct kcopyd_job *job = (struct kcopyd_job *) context; 313 struct dm_kcopyd_client *kc = job->kc; 314 315 if (error) { 316 if (job->rw == WRITE) 317 job->write_err |= error; 318 else 319 job->read_err = 1; 320 321 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { 322 push(&kc->complete_jobs, job); 323 wake(kc); 324 return; 325 } 326 } 327 328 if (job->rw == WRITE) 329 push(&kc->complete_jobs, job); 330 331 else { 332 job->rw = WRITE; 333 push(&kc->io_jobs, job); 334 } 335 336 wake(kc); 337} 338 339/* 340 * Request io on as many buffer heads as we can currently get for 341 * a particular job. 342 */ 343static int run_io_job(struct kcopyd_job *job) 344{ 345 int r; 346 struct dm_io_request io_req = { 347 .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, 348 .mem.type = DM_IO_PAGE_LIST, 349 .mem.ptr.pl = job->pages, 350 .mem.offset = job->offset, 351 .notify.fn = complete_io, 352 .notify.context = job, 353 .client = job->kc->io_client, 354 }; 355 356 if (job->rw == READ) 357 r = dm_io(&io_req, 1, &job->source, NULL); 358 else 359 r = dm_io(&io_req, job->num_dests, job->dests, NULL); 360 361 return r; 362} 363 364static int run_pages_job(struct kcopyd_job *job) 365{ 366 int r; 367 368 job->nr_pages = dm_div_up(job->dests[0].count + job->offset, 369 PAGE_SIZE >> 9); 370 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); 371 if (!r) { 372 /* this job is ready for io */ 373 push(&job->kc->io_jobs, job); 374 return 0; 375 } 376 377 if (r == -ENOMEM) 378 /* can't complete now */ 379 return 1; 380 381 return r; 382} 383 384/* 385 * Run through a list for as long as possible. Returns the count 386 * of successful jobs. 387 */ 388static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, 389 int (*fn) (struct kcopyd_job *)) 390{ 391 struct kcopyd_job *job; 392 int r, count = 0; 393 394 while ((job = pop(jobs, kc))) { 395 396 r = fn(job); 397 398 if (r < 0) { 399 /* error this rogue job */ 400 if (job->rw == WRITE) 401 job->write_err = (unsigned long) -1L; 402 else 403 job->read_err = 1; 404 push(&kc->complete_jobs, job); 405 break; 406 } 407 408 if (r > 0) { 409 /* 410 * We couldn't service this job ATM, so 411 * push this job back onto the list. 412 */ 413 push_head(jobs, job); 414 break; 415 } 416 417 count++; 418 } 419 420 return count; 421} 422 423/* 424 * kcopyd does this every time it's woken up. 425 */ 426static void do_work(struct work_struct *work) 427{ 428 struct dm_kcopyd_client *kc = container_of(work, 429 struct dm_kcopyd_client, kcopyd_work); 430 431 /* 432 * The order that these are called is *very* important. 433 * complete jobs can free some pages for pages jobs. 434 * Pages jobs when successful will jump onto the io jobs 435 * list. io jobs call wake when they complete and it all 436 * starts again. 437 */ 438 process_jobs(&kc->complete_jobs, kc, run_complete_job); 439 process_jobs(&kc->pages_jobs, kc, run_pages_job); 440 process_jobs(&kc->io_jobs, kc, run_io_job); 441} 442 443/* 444 * If we are copying a small region we just dispatch a single job 445 * to do the copy, otherwise the io has to be split up into many 446 * jobs. 447 */ 448static void dispatch_job(struct kcopyd_job *job) 449{ 450 struct dm_kcopyd_client *kc = job->kc; 451 atomic_inc(&kc->nr_jobs); 452 if (unlikely(!job->source.count)) 453 push(&kc->complete_jobs, job); 454 else 455 push(&kc->pages_jobs, job); 456 wake(kc); 457} 458 459#define SUB_JOB_SIZE 128 460static void segment_complete(int read_err, unsigned long write_err, 461 void *context) 462{ 463 sector_t progress = 0; 464 sector_t count = 0; 465 struct kcopyd_job *job = (struct kcopyd_job *) context; 466 struct dm_kcopyd_client *kc = job->kc; 467 468 mutex_lock(&job->lock); 469 470 /* update the error */ 471 if (read_err) 472 job->read_err = 1; 473 474 if (write_err) 475 job->write_err |= write_err; 476 477 /* 478 * Only dispatch more work if there hasn't been an error. 479 */ 480 if ((!job->read_err && !job->write_err) || 481 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { 482 /* get the next chunk of work */ 483 progress = job->progress; 484 count = job->source.count - progress; 485 if (count) { 486 if (count > SUB_JOB_SIZE) 487 count = SUB_JOB_SIZE; 488 489 job->progress += count; 490 } 491 } 492 mutex_unlock(&job->lock); 493 494 if (count) { 495 int i; 496 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, 497 GFP_NOIO); 498 499 *sub_job = *job; 500 sub_job->source.sector += progress; 501 sub_job->source.count = count; 502 503 for (i = 0; i < job->num_dests; i++) { 504 sub_job->dests[i].sector += progress; 505 sub_job->dests[i].count = count; 506 } 507 508 sub_job->fn = segment_complete; 509 sub_job->context = job; 510 dispatch_job(sub_job); 511 512 } else if (atomic_dec_and_test(&job->sub_jobs)) { 513 514 /* 515 * Queue the completion callback to the kcopyd thread. 516 * 517 * Some callers assume that all the completions are called 518 * from a single thread and don't race with each other. 519 * 520 * We must not call the callback directly here because this 521 * code may not be executing in the thread. 522 */ 523 push(&kc->complete_jobs, job); 524 wake(kc); 525 } 526} 527 528/* 529 * Create some little jobs that will do the move between 530 * them. 531 */ 532#define SPLIT_COUNT 8 533static void split_job(struct kcopyd_job *job) 534{ 535 int i; 536 537 atomic_inc(&job->kc->nr_jobs); 538 539 atomic_set(&job->sub_jobs, SPLIT_COUNT); 540 for (i = 0; i < SPLIT_COUNT; i++) 541 segment_complete(0, 0u, job); 542} 543 544int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, 545 unsigned int num_dests, struct dm_io_region *dests, 546 unsigned int flags, dm_kcopyd_notify_fn fn, void *context) 547{ 548 struct kcopyd_job *job; 549 550 /* 551 * Allocate a new job. 552 */ 553 job = mempool_alloc(kc->job_pool, GFP_NOIO); 554 555 /* 556 * set up for the read. 557 */ 558 job->kc = kc; 559 job->flags = flags; 560 job->read_err = 0; 561 job->write_err = 0; 562 job->rw = READ; 563 564 job->source = *from; 565 566 job->num_dests = num_dests; 567 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); 568 569 job->offset = 0; 570 job->nr_pages = 0; 571 job->pages = NULL; 572 573 job->fn = fn; 574 job->context = context; 575 576 if (job->source.count < SUB_JOB_SIZE) 577 dispatch_job(job); 578 579 else { 580 mutex_init(&job->lock); 581 job->progress = 0; 582 split_job(job); 583 } 584 585 return 0; 586} 587EXPORT_SYMBOL(dm_kcopyd_copy); 588 589/* 590 * Cancels a kcopyd job, eg. someone might be deactivating a 591 * mirror. 592 */ 593 594/*----------------------------------------------------------------- 595 * Client setup 596 *---------------------------------------------------------------*/ 597int dm_kcopyd_client_create(unsigned int nr_pages, 598 struct dm_kcopyd_client **result) 599{ 600 int r = -ENOMEM; 601 struct dm_kcopyd_client *kc; 602 603 kc = kmalloc(sizeof(*kc), GFP_KERNEL); 604 if (!kc) 605 return -ENOMEM; 606 607 spin_lock_init(&kc->lock); 608 spin_lock_init(&kc->job_lock); 609 INIT_LIST_HEAD(&kc->complete_jobs); 610 INIT_LIST_HEAD(&kc->io_jobs); 611 INIT_LIST_HEAD(&kc->pages_jobs); 612 613 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); 614 if (!kc->job_pool) 615 goto bad_slab; 616 617 INIT_WORK(&kc->kcopyd_work, do_work); 618 kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); 619 if (!kc->kcopyd_wq) 620 goto bad_workqueue; 621 622 kc->pages = NULL; 623 kc->nr_pages = kc->nr_free_pages = 0; 624 r = client_alloc_pages(kc, nr_pages); 625 if (r) 626 goto bad_client_pages; 627 628 kc->io_client = dm_io_client_create(nr_pages); 629 if (IS_ERR(kc->io_client)) { 630 r = PTR_ERR(kc->io_client); 631 goto bad_io_client; 632 } 633 634 init_waitqueue_head(&kc->destroyq); 635 atomic_set(&kc->nr_jobs, 0); 636 637 *result = kc; 638 return 0; 639 640bad_io_client: 641 client_free_pages(kc); 642bad_client_pages: 643 destroy_workqueue(kc->kcopyd_wq); 644bad_workqueue: 645 mempool_destroy(kc->job_pool); 646bad_slab: 647 kfree(kc); 648 649 return r; 650} 651EXPORT_SYMBOL(dm_kcopyd_client_create); 652 653void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) 654{ 655 /* Wait for completion of all jobs submitted by this client. */ 656 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); 657 658 BUG_ON(!list_empty(&kc->complete_jobs)); 659 BUG_ON(!list_empty(&kc->io_jobs)); 660 BUG_ON(!list_empty(&kc->pages_jobs)); 661 destroy_workqueue(kc->kcopyd_wq); 662 dm_io_client_destroy(kc->io_client); 663 client_free_pages(kc); 664 mempool_destroy(kc->job_pool); 665 kfree(kc); 666} 667EXPORT_SYMBOL(dm_kcopyd_client_destroy); 668