Lines Matching refs:kc
307 static void wake(struct dm_kcopyd_client *kc) in wake() argument
309 queue_work(kc->kcopyd_wq, &kc->kcopyd_work); in wake()
354 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) in kcopyd_put_pages() argument
361 if (kc->nr_free_pages >= kc->nr_reserved_pages) in kcopyd_put_pages()
364 pl->next = kc->pages; in kcopyd_put_pages()
365 kc->pages = pl; in kcopyd_put_pages()
366 kc->nr_free_pages++; in kcopyd_put_pages()
373 static int kcopyd_get_pages(struct dm_kcopyd_client *kc, in kcopyd_get_pages() argument
385 pl = kc->pages; in kcopyd_get_pages()
388 kc->pages = pl->next; in kcopyd_get_pages()
389 kc->nr_free_pages--; in kcopyd_get_pages()
399 kcopyd_put_pages(kc, *pages); in kcopyd_get_pages()
420 static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages) in client_reserve_pages() argument
436 kc->nr_reserved_pages += nr_pages; in client_reserve_pages()
437 kcopyd_put_pages(kc, pl); in client_reserve_pages()
442 static void client_free_pages(struct dm_kcopyd_client *kc) in client_free_pages() argument
444 BUG_ON(kc->nr_free_pages != kc->nr_reserved_pages); in client_free_pages()
445 drop_pages(kc->pages); in client_free_pages()
446 kc->pages = NULL; in client_free_pages()
447 kc->nr_free_pages = kc->nr_reserved_pages = 0; in client_free_pages()
456 struct dm_kcopyd_client *kc; member
529 struct dm_kcopyd_client *kc) in pop_io_job() argument
554 struct dm_kcopyd_client *kc) in pop() argument
559 spin_lock_irqsave(&kc->job_lock, flags); in pop()
562 if (jobs == &kc->io_jobs) in pop()
563 job = pop_io_job(jobs, kc); in pop()
569 spin_unlock_irqrestore(&kc->job_lock, flags); in pop()
577 struct dm_kcopyd_client *kc = job->kc; in push() local
579 spin_lock_irqsave(&kc->job_lock, flags); in push()
581 spin_unlock_irqrestore(&kc->job_lock, flags); in push()
588 struct dm_kcopyd_client *kc = job->kc; in push_head() local
590 spin_lock_irqsave(&kc->job_lock, flags); in push_head()
592 spin_unlock_irqrestore(&kc->job_lock, flags); in push_head()
610 struct dm_kcopyd_client *kc = job->kc; in run_complete_job() local
613 kcopyd_put_pages(kc, job->pages); in run_complete_job()
620 mempool_free(job, &kc->job_pool); in run_complete_job()
624 if (atomic_dec_and_test(&kc->nr_jobs)) in run_complete_job()
625 wake_up(&kc->destroyq); in run_complete_job()
635 struct dm_kcopyd_client *kc = job->kc; in complete_io() local
637 io_job_finish(kc->throttle); in complete_io()
646 push(&kc->complete_jobs, job); in complete_io()
647 wake(kc); in complete_io()
653 push(&kc->complete_jobs, job); in complete_io()
657 push(&kc->io_jobs, job); in complete_io()
660 wake(kc); in complete_io()
678 .client = job->kc->io_client, in run_io_job()
691 io_job_start(job->kc->throttle); in run_io_job()
706 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags); in run_pages_job()
709 push(&job->kc->io_jobs, job); in run_pages_job()
724 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, in process_jobs() argument
730 while ((job = pop(jobs, kc))) { in process_jobs()
740 push(&kc->complete_jobs, job); in process_jobs()
741 wake(kc); in process_jobs()
765 struct dm_kcopyd_client *kc = container_of(work, in do_work() local
777 spin_lock_irqsave(&kc->job_lock, flags); in do_work()
778 list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs); in do_work()
779 spin_unlock_irqrestore(&kc->job_lock, flags); in do_work()
782 process_jobs(&kc->complete_jobs, kc, run_complete_job); in do_work()
783 process_jobs(&kc->pages_jobs, kc, run_pages_job); in do_work()
784 process_jobs(&kc->io_jobs, kc, run_io_job); in do_work()
795 struct dm_kcopyd_client *kc = job->kc; in dispatch_job() local
796 atomic_inc(&kc->nr_jobs); in dispatch_job()
798 push(&kc->callback_jobs, job); in dispatch_job()
800 push(&kc->io_jobs, job); in dispatch_job()
802 push(&kc->pages_jobs, job); in dispatch_job()
803 wake(kc); in dispatch_job()
814 struct dm_kcopyd_client *kc = job->kc; in segment_complete() local
834 if (count > kc->sub_job_size) in segment_complete()
835 count = kc->sub_job_size; in segment_complete()
870 push(&kc->complete_jobs, job); in segment_complete()
871 wake(kc); in segment_complete()
882 atomic_inc(&master_job->kc->nr_jobs); in split_job()
891 void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, in dm_kcopyd_copy() argument
902 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_copy()
908 job->kc = kc; in dm_kcopyd_copy()
962 if (job->source.count <= kc->sub_job_size) in dm_kcopyd_copy()
971 void dm_kcopyd_zero(struct dm_kcopyd_client *kc, in dm_kcopyd_zero() argument
975 dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context); in dm_kcopyd_zero()
979 void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, in dm_kcopyd_prepare_callback() argument
984 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_prepare_callback()
987 job->kc = kc; in dm_kcopyd_prepare_callback()
992 atomic_inc(&kc->nr_jobs); in dm_kcopyd_prepare_callback()
1001 struct dm_kcopyd_client *kc = job->kc; in dm_kcopyd_do_callback() local
1006 push(&kc->callback_jobs, job); in dm_kcopyd_do_callback()
1007 wake(kc); in dm_kcopyd_do_callback()
1030 struct dm_kcopyd_client *kc; in dm_kcopyd_client_create() local
1032 kc = kzalloc(sizeof(*kc), GFP_KERNEL); in dm_kcopyd_client_create()
1033 if (!kc) in dm_kcopyd_client_create()
1036 spin_lock_init(&kc->job_lock); in dm_kcopyd_client_create()
1037 INIT_LIST_HEAD(&kc->callback_jobs); in dm_kcopyd_client_create()
1038 INIT_LIST_HEAD(&kc->complete_jobs); in dm_kcopyd_client_create()
1039 INIT_LIST_HEAD(&kc->io_jobs); in dm_kcopyd_client_create()
1040 INIT_LIST_HEAD(&kc->pages_jobs); in dm_kcopyd_client_create()
1041 kc->throttle = throttle; in dm_kcopyd_client_create()
1043 r = mempool_init_slab_pool(&kc->job_pool, MIN_JOBS, _job_cache); in dm_kcopyd_client_create()
1047 INIT_WORK(&kc->kcopyd_work, do_work); in dm_kcopyd_client_create()
1048 kc->kcopyd_wq = alloc_workqueue("kcopyd", WQ_MEM_RECLAIM, 0); in dm_kcopyd_client_create()
1049 if (!kc->kcopyd_wq) { in dm_kcopyd_client_create()
1054 kc->sub_job_size = dm_get_kcopyd_subjob_size(); in dm_kcopyd_client_create()
1055 reserve_pages = DIV_ROUND_UP(kc->sub_job_size << SECTOR_SHIFT, PAGE_SIZE); in dm_kcopyd_client_create()
1057 kc->pages = NULL; in dm_kcopyd_client_create()
1058 kc->nr_reserved_pages = kc->nr_free_pages = 0; in dm_kcopyd_client_create()
1059 r = client_reserve_pages(kc, reserve_pages); in dm_kcopyd_client_create()
1063 kc->io_client = dm_io_client_create(); in dm_kcopyd_client_create()
1064 if (IS_ERR(kc->io_client)) { in dm_kcopyd_client_create()
1065 r = PTR_ERR(kc->io_client); in dm_kcopyd_client_create()
1069 init_waitqueue_head(&kc->destroyq); in dm_kcopyd_client_create()
1070 atomic_set(&kc->nr_jobs, 0); in dm_kcopyd_client_create()
1072 return kc; in dm_kcopyd_client_create()
1075 client_free_pages(kc); in dm_kcopyd_client_create()
1077 destroy_workqueue(kc->kcopyd_wq); in dm_kcopyd_client_create()
1079 mempool_exit(&kc->job_pool); in dm_kcopyd_client_create()
1081 kfree(kc); in dm_kcopyd_client_create()
1087 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) in dm_kcopyd_client_destroy() argument
1090 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); in dm_kcopyd_client_destroy()
1092 BUG_ON(!list_empty(&kc->callback_jobs)); in dm_kcopyd_client_destroy()
1093 BUG_ON(!list_empty(&kc->complete_jobs)); in dm_kcopyd_client_destroy()
1094 BUG_ON(!list_empty(&kc->io_jobs)); in dm_kcopyd_client_destroy()
1095 BUG_ON(!list_empty(&kc->pages_jobs)); in dm_kcopyd_client_destroy()
1096 destroy_workqueue(kc->kcopyd_wq); in dm_kcopyd_client_destroy()
1097 dm_io_client_destroy(kc->io_client); in dm_kcopyd_client_destroy()
1098 client_free_pages(kc); in dm_kcopyd_client_destroy()
1099 mempool_exit(&kc->job_pool); in dm_kcopyd_client_destroy()
1100 kfree(kc); in dm_kcopyd_client_destroy()