Lines Matching full:job
42 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
461 * Error state of the job.
481 * Set this to ensure you are notified when the job has
488 * These fields are only used if the job has been split
525 * Functions to push and pop a job onto the head of a given job
531 struct kcopyd_job *job; in pop_io_job() local
537 list_for_each_entry(job, jobs, list) { in pop_io_job()
538 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { in pop_io_job()
539 list_del(&job->list); in pop_io_job()
540 return job; in pop_io_job()
543 if (job->write_offset == job->master_job->write_offset) { in pop_io_job()
544 job->master_job->write_offset += job->source.count; in pop_io_job()
545 list_del(&job->list); in pop_io_job()
546 return job; in pop_io_job()
556 struct kcopyd_job *job = NULL; in pop() local
563 job = pop_io_job(jobs, kc); in pop()
565 job = list_entry(jobs->next, struct kcopyd_job, list); in pop()
566 list_del(&job->list); in pop()
571 return job; in pop()
574 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument
577 struct dm_kcopyd_client *kc = job->kc; in push()
580 list_add_tail(&job->list, jobs); in push()
585 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument
588 struct dm_kcopyd_client *kc = job->kc; in push_head()
591 list_add(&job->list, jobs); in push_head()
597 * job list.
604 static int run_complete_job(struct kcopyd_job *job) in run_complete_job() argument
606 void *context = job->context; in run_complete_job()
607 int read_err = job->read_err; in run_complete_job()
608 unsigned long write_err = job->write_err; in run_complete_job()
609 dm_kcopyd_notify_fn fn = job->fn; in run_complete_job()
610 struct dm_kcopyd_client *kc = job->kc; in run_complete_job()
612 if (job->pages && job->pages != &zero_page_list) in run_complete_job()
613 kcopyd_put_pages(kc, job->pages); in run_complete_job()
615 * If this is the master job, the sub jobs have already in run_complete_job()
618 if (job->master_job == job) { in run_complete_job()
619 mutex_destroy(&job->lock); in run_complete_job()
620 mempool_free(job, &kc->job_pool); in run_complete_job()
634 struct kcopyd_job *job = (struct kcopyd_job *) context; in complete_io() local
635 struct dm_kcopyd_client *kc = job->kc; in complete_io()
640 if (op_is_write(job->rw)) in complete_io()
641 job->write_err |= error; in complete_io()
643 job->read_err = 1; in complete_io()
645 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { in complete_io()
646 push(&kc->complete_jobs, job); in complete_io()
652 if (op_is_write(job->rw)) in complete_io()
653 push(&kc->complete_jobs, job); in complete_io()
656 job->rw = WRITE; in complete_io()
657 push(&kc->io_jobs, job); in complete_io()
665 * a particular job.
667 static int run_io_job(struct kcopyd_job *job) in run_io_job() argument
671 .bi_op = job->rw, in run_io_job()
674 .mem.ptr.pl = job->pages, in run_io_job()
677 .notify.context = job, in run_io_job()
678 .client = job->kc->io_client, in run_io_job()
685 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && in run_io_job()
686 job->master_job->write_err) { in run_io_job()
687 job->write_err = job->master_job->write_err; in run_io_job()
691 io_job_start(job->kc->throttle); in run_io_job()
693 if (job->rw == READ) in run_io_job()
694 r = dm_io(&io_req, 1, &job->source, NULL); in run_io_job()
696 r = dm_io(&io_req, job->num_dests, job->dests, NULL); in run_io_job()
701 static int run_pages_job(struct kcopyd_job *job) in run_pages_job() argument
704 unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); in run_pages_job()
706 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags); in run_pages_job()
708 /* this job is ready for io */ in run_pages_job()
709 push(&job->kc->io_jobs, job); in run_pages_job()
727 struct kcopyd_job *job; in process_jobs() local
730 while ((job = pop(jobs, kc))) { in process_jobs()
732 r = fn(job); in process_jobs()
735 /* error this rogue job */ in process_jobs()
736 if (op_is_write(job->rw)) in process_jobs()
737 job->write_err = (unsigned long) -1L; in process_jobs()
739 job->read_err = 1; in process_jobs()
740 push(&kc->complete_jobs, job); in process_jobs()
747 * We couldn't service this job ATM, so in process_jobs()
748 * push this job back onto the list. in process_jobs()
750 push_head(jobs, job); in process_jobs()
789 * If we are copying a small region we just dispatch a single job
793 static void dispatch_job(struct kcopyd_job *job) in dispatch_job() argument
795 struct dm_kcopyd_client *kc = job->kc; in dispatch_job()
797 if (unlikely(!job->source.count)) in dispatch_job()
798 push(&kc->callback_jobs, job); in dispatch_job()
799 else if (job->pages == &zero_page_list) in dispatch_job()
800 push(&kc->io_jobs, job); in dispatch_job()
802 push(&kc->pages_jobs, job); in dispatch_job()
813 struct kcopyd_job *job = sub_job->master_job; in segment_complete() local
814 struct dm_kcopyd_client *kc = job->kc; in segment_complete()
816 mutex_lock(&job->lock); in segment_complete()
820 job->read_err = 1; in segment_complete()
823 job->write_err |= write_err; in segment_complete()
828 if ((!job->read_err && !job->write_err) || in segment_complete()
829 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { in segment_complete()
831 progress = job->progress; in segment_complete()
832 count = job->source.count - progress; in segment_complete()
837 job->progress += count; in segment_complete()
840 mutex_unlock(&job->lock); in segment_complete()
845 *sub_job = *job; in segment_complete()
850 for (i = 0; i < job->num_dests; i++) { in segment_complete()
859 } else if (atomic_dec_and_test(&job->sub_jobs)) { in segment_complete()
870 push(&kc->complete_jobs, job); in segment_complete()
895 struct kcopyd_job *job; in dm_kcopyd_copy() local
899 * Allocate an array of jobs consisting of one master job in dm_kcopyd_copy()
902 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_copy()
903 mutex_init(&job->lock); in dm_kcopyd_copy()
908 job->kc = kc; in dm_kcopyd_copy()
909 job->flags = flags; in dm_kcopyd_copy()
910 job->read_err = 0; in dm_kcopyd_copy()
911 job->write_err = 0; in dm_kcopyd_copy()
913 job->num_dests = num_dests; in dm_kcopyd_copy()
914 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); in dm_kcopyd_copy()
921 if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) { in dm_kcopyd_copy()
922 for (i = 0; i < job->num_dests; i++) { in dm_kcopyd_copy()
924 set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags); in dm_kcopyd_copy()
933 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && in dm_kcopyd_copy()
934 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) in dm_kcopyd_copy()
935 clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags); in dm_kcopyd_copy()
938 job->source = *from; in dm_kcopyd_copy()
939 job->pages = NULL; in dm_kcopyd_copy()
940 job->rw = READ; in dm_kcopyd_copy()
942 memset(&job->source, 0, sizeof job->source); in dm_kcopyd_copy()
943 job->source.count = job->dests[0].count; in dm_kcopyd_copy()
944 job->pages = &zero_page_list; in dm_kcopyd_copy()
949 job->rw = REQ_OP_WRITE_ZEROES; in dm_kcopyd_copy()
950 for (i = 0; i < job->num_dests; i++) in dm_kcopyd_copy()
951 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) { in dm_kcopyd_copy()
952 job->rw = WRITE; in dm_kcopyd_copy()
957 job->fn = fn; in dm_kcopyd_copy()
958 job->context = context; in dm_kcopyd_copy()
959 job->master_job = job; in dm_kcopyd_copy()
960 job->write_offset = 0; in dm_kcopyd_copy()
962 if (job->source.count <= kc->sub_job_size) in dm_kcopyd_copy()
963 dispatch_job(job); in dm_kcopyd_copy()
965 job->progress = 0; in dm_kcopyd_copy()
966 split_job(job); in dm_kcopyd_copy()
982 struct kcopyd_job *job; in dm_kcopyd_prepare_callback() local
984 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_prepare_callback()
986 memset(job, 0, sizeof(struct kcopyd_job)); in dm_kcopyd_prepare_callback()
987 job->kc = kc; in dm_kcopyd_prepare_callback()
988 job->fn = fn; in dm_kcopyd_prepare_callback()
989 job->context = context; in dm_kcopyd_prepare_callback()
990 job->master_job = job; in dm_kcopyd_prepare_callback()
994 return job; in dm_kcopyd_prepare_callback()
1000 struct kcopyd_job *job = j; in dm_kcopyd_do_callback() local
1001 struct dm_kcopyd_client *kc = job->kc; in dm_kcopyd_do_callback()
1003 job->read_err = read_err; in dm_kcopyd_do_callback()
1004 job->write_err = write_err; in dm_kcopyd_do_callback()
1006 push(&kc->callback_jobs, job); in dm_kcopyd_do_callback()
1012 * Cancels a kcopyd job, eg. someone might be deactivating a
1016 int kcopyd_cancel(struct kcopyd_job *job, int block)