Lines Matching refs:job

420 	struct kcopyd_job *job;  in pop_io_job()  local
426 list_for_each_entry(job, jobs, list) { in pop_io_job()
427 if (job->op == REQ_OP_READ || in pop_io_job()
428 !(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in pop_io_job()
429 list_del(&job->list); in pop_io_job()
430 return job; in pop_io_job()
433 if (job->write_offset == job->master_job->write_offset) { in pop_io_job()
434 job->master_job->write_offset += job->source.count; in pop_io_job()
435 list_del(&job->list); in pop_io_job()
436 return job; in pop_io_job()
446 struct kcopyd_job *job = NULL; in pop() local
452 job = pop_io_job(jobs, kc); in pop()
454 job = list_entry(jobs->next, struct kcopyd_job, list); in pop()
455 list_del(&job->list); in pop()
460 return job; in pop()
463 static void push(struct list_head *jobs, struct kcopyd_job *job) in push() argument
466 struct dm_kcopyd_client *kc = job->kc; in push()
469 list_add_tail(&job->list, jobs); in push()
474 static void push_head(struct list_head *jobs, struct kcopyd_job *job) in push_head() argument
476 struct dm_kcopyd_client *kc = job->kc; in push_head()
479 list_add(&job->list, jobs); in push_head()
492 static int run_complete_job(struct kcopyd_job *job) in run_complete_job() argument
494 void *context = job->context; in run_complete_job()
495 int read_err = job->read_err; in run_complete_job()
496 unsigned long write_err = job->write_err; in run_complete_job()
497 dm_kcopyd_notify_fn fn = job->fn; in run_complete_job()
498 struct dm_kcopyd_client *kc = job->kc; in run_complete_job()
500 if (job->pages && job->pages != &zero_page_list) in run_complete_job()
501 kcopyd_put_pages(kc, job->pages); in run_complete_job()
506 if (job->master_job == job) { in run_complete_job()
507 mutex_destroy(&job->lock); in run_complete_job()
508 mempool_free(job, &kc->job_pool); in run_complete_job()
522 struct kcopyd_job *job = (struct kcopyd_job *) context; in complete_io() local
523 struct dm_kcopyd_client *kc = job->kc; in complete_io()
528 if (op_is_write(job->op)) in complete_io()
529 job->write_err |= error; in complete_io()
531 job->read_err = 1; in complete_io()
533 if (!(job->flags & BIT(DM_KCOPYD_IGNORE_ERROR))) { in complete_io()
534 push(&kc->complete_jobs, job); in complete_io()
540 if (op_is_write(job->op)) in complete_io()
541 push(&kc->complete_jobs, job); in complete_io()
544 job->op = REQ_OP_WRITE; in complete_io()
545 push(&kc->io_jobs, job); in complete_io()
555 static int run_io_job(struct kcopyd_job *job) in run_io_job() argument
559 .bi_opf = job->op, in run_io_job()
561 .mem.ptr.pl = job->pages, in run_io_job()
564 .notify.context = job, in run_io_job()
565 .client = job->kc->io_client, in run_io_job()
572 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) && in run_io_job()
573 job->master_job->write_err) { in run_io_job()
574 job->write_err = job->master_job->write_err; in run_io_job()
578 io_job_start(job->kc->throttle); in run_io_job()
580 if (job->op == REQ_OP_READ) in run_io_job()
581 r = dm_io(&io_req, 1, &job->source, NULL); in run_io_job()
583 r = dm_io(&io_req, job->num_dests, job->dests, NULL); in run_io_job()
588 static int run_pages_job(struct kcopyd_job *job) in run_pages_job() argument
591 unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); in run_pages_job()
593 r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); in run_pages_job()
596 push(&job->kc->io_jobs, job); in run_pages_job()
614 struct kcopyd_job *job; in process_jobs() local
617 while ((job = pop(jobs, kc))) { in process_jobs()
619 r = fn(job); in process_jobs()
623 if (op_is_write(job->op)) in process_jobs()
624 job->write_err = (unsigned long) -1L; in process_jobs()
626 job->read_err = 1; in process_jobs()
627 push(&kc->complete_jobs, job); in process_jobs()
637 push_head(jobs, job); in process_jobs()
679 static void dispatch_job(struct kcopyd_job *job) in dispatch_job() argument
681 struct dm_kcopyd_client *kc = job->kc; in dispatch_job()
684 if (unlikely(!job->source.count)) in dispatch_job()
685 push(&kc->callback_jobs, job); in dispatch_job()
686 else if (job->pages == &zero_page_list) in dispatch_job()
687 push(&kc->io_jobs, job); in dispatch_job()
689 push(&kc->pages_jobs, job); in dispatch_job()
700 struct kcopyd_job *job = sub_job->master_job; in segment_complete() local
701 struct dm_kcopyd_client *kc = job->kc; in segment_complete()
703 mutex_lock(&job->lock); in segment_complete()
707 job->read_err = 1; in segment_complete()
710 job->write_err |= write_err; in segment_complete()
715 if ((!job->read_err && !job->write_err) || in segment_complete()
716 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) { in segment_complete()
718 progress = job->progress; in segment_complete()
719 count = job->source.count - progress; in segment_complete()
724 job->progress += count; in segment_complete()
727 mutex_unlock(&job->lock); in segment_complete()
732 *sub_job = *job; in segment_complete()
737 for (i = 0; i < job->num_dests; i++) { in segment_complete()
746 } else if (atomic_dec_and_test(&job->sub_jobs)) { in segment_complete()
757 push(&kc->complete_jobs, job); in segment_complete()
782 struct kcopyd_job *job; in dm_kcopyd_copy() local
789 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_copy()
790 mutex_init(&job->lock); in dm_kcopyd_copy()
795 job->kc = kc; in dm_kcopyd_copy()
796 job->flags = flags; in dm_kcopyd_copy()
797 job->read_err = 0; in dm_kcopyd_copy()
798 job->write_err = 0; in dm_kcopyd_copy()
800 job->num_dests = num_dests; in dm_kcopyd_copy()
801 memcpy(&job->dests, dests, sizeof(*dests) * num_dests); in dm_kcopyd_copy()
808 if (!(job->flags & BIT(DM_KCOPYD_WRITE_SEQ))) { in dm_kcopyd_copy()
809 for (i = 0; i < job->num_dests; i++) { in dm_kcopyd_copy()
811 job->flags |= BIT(DM_KCOPYD_WRITE_SEQ); in dm_kcopyd_copy()
820 if (job->flags & BIT(DM_KCOPYD_WRITE_SEQ) && in dm_kcopyd_copy()
821 job->flags & BIT(DM_KCOPYD_IGNORE_ERROR)) in dm_kcopyd_copy()
822 job->flags &= ~BIT(DM_KCOPYD_IGNORE_ERROR); in dm_kcopyd_copy()
825 job->source = *from; in dm_kcopyd_copy()
826 job->pages = NULL; in dm_kcopyd_copy()
827 job->op = REQ_OP_READ; in dm_kcopyd_copy()
829 memset(&job->source, 0, sizeof(job->source)); in dm_kcopyd_copy()
830 job->source.count = job->dests[0].count; in dm_kcopyd_copy()
831 job->pages = &zero_page_list; in dm_kcopyd_copy()
836 job->op = REQ_OP_WRITE_ZEROES; in dm_kcopyd_copy()
837 for (i = 0; i < job->num_dests; i++) in dm_kcopyd_copy()
838 if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) { in dm_kcopyd_copy()
839 job->op = REQ_OP_WRITE; in dm_kcopyd_copy()
844 job->fn = fn; in dm_kcopyd_copy()
845 job->context = context; in dm_kcopyd_copy()
846 job->master_job = job; in dm_kcopyd_copy()
847 job->write_offset = 0; in dm_kcopyd_copy()
849 if (job->source.count <= kc->sub_job_size) in dm_kcopyd_copy()
850 dispatch_job(job); in dm_kcopyd_copy()
852 job->progress = 0; in dm_kcopyd_copy()
853 split_job(job); in dm_kcopyd_copy()
869 struct kcopyd_job *job; in dm_kcopyd_prepare_callback() local
871 job = mempool_alloc(&kc->job_pool, GFP_NOIO); in dm_kcopyd_prepare_callback()
873 memset(job, 0, sizeof(struct kcopyd_job)); in dm_kcopyd_prepare_callback()
874 job->kc = kc; in dm_kcopyd_prepare_callback()
875 job->fn = fn; in dm_kcopyd_prepare_callback()
876 job->context = context; in dm_kcopyd_prepare_callback()
877 job->master_job = job; in dm_kcopyd_prepare_callback()
881 return job; in dm_kcopyd_prepare_callback()
887 struct kcopyd_job *job = j; in dm_kcopyd_do_callback() local
888 struct dm_kcopyd_client *kc = job->kc; in dm_kcopyd_do_callback()
890 job->read_err = read_err; in dm_kcopyd_do_callback()
891 job->write_err = write_err; in dm_kcopyd_do_callback()
893 push(&kc->callback_jobs, job); in dm_kcopyd_do_callback()
903 int kcopyd_cancel(struct kcopyd_job *job, int block)