Lines Matching refs:percpu

1526 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)  in to_addr_page()  argument
1528 return percpu->scribble + i * percpu->scribble_obj_size; in to_addr_page()
1533 struct raid5_percpu *percpu, int i) in to_addr_conv() argument
1535 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1542 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1544 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1548 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1551 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5()
1552 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1578 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1645 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1648 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_1()
1649 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1685 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1700 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1709 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1720 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_2()
1721 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1767 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1795 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1803 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1811 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1844 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1848 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_prexor5()
1849 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1878 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1886 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1889 struct page **blocks = to_addr_page(percpu, 0); in ops_run_prexor6()
1890 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1900 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
2027 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
2060 xor_srcs = to_addr_page(percpu, j); in ops_run_reconstruct5()
2061 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2103 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2107 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2125 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2154 blocks = to_addr_page(percpu, j); in ops_run_reconstruct6()
2155 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2173 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2176 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2199 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2206 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_check_p()
2207 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2230 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2240 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2242 struct page **srcs = to_addr_page(percpu, 0); in ops_run_check_pq()
2243 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2257 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2260 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2269 struct raid5_percpu *percpu; in raid_run_ops() local
2271 local_lock(&conf->percpu->lock); in raid_run_ops()
2272 percpu = this_cpu_ptr(conf->percpu); in raid_run_ops()
2280 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2283 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2285 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2294 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2296 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2300 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2309 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2311 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2316 ops_run_check_p(sh, percpu); in raid_run_ops()
2318 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2320 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2332 local_unlock(&conf->percpu->lock); in raid_run_ops()
2446 static int scribble_alloc(struct raid5_percpu *percpu, in scribble_alloc() argument
2464 kvfree(percpu->scribble); in scribble_alloc()
2466 percpu->scribble = scribble; in scribble_alloc()
2467 percpu->scribble_obj_size = obj_size; in scribble_alloc()
2485 struct raid5_percpu *percpu; in resize_chunks() local
2487 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2488 err = scribble_alloc(percpu, new_disks, in resize_chunks()
7291 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
7293 safe_put_page(percpu->spare_page); in free_scratch_buffer()
7294 percpu->spare_page = NULL; in free_scratch_buffer()
7295 kvfree(percpu->scribble); in free_scratch_buffer()
7296 percpu->scribble = NULL; in free_scratch_buffer()
7299 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
7301 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
7302 percpu->spare_page = alloc_page(GFP_KERNEL); in alloc_scratch_buffer()
7303 if (!percpu->spare_page) in alloc_scratch_buffer()
7307 if (scribble_alloc(percpu, in alloc_scratch_buffer()
7313 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
7317 local_lock_init(&percpu->lock); in alloc_scratch_buffer()
7325 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
7331 if (!conf->percpu) in raid5_free_percpu()
7335 free_percpu(conf->percpu); in raid5_free_percpu()
7361 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare() local
7363 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
7375 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
7376 if (!conf->percpu) in raid5_alloc_percpu()