Lines Matching refs:percpu

1537 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)  in to_addr_page()  argument
1539 return percpu->scribble + i * percpu->scribble_obj_size; in to_addr_page()
1544 struct raid5_percpu *percpu, int i) in to_addr_conv() argument
1546 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1553 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1555 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1559 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1562 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_compute5()
1563 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1589 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1656 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1659 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_1()
1660 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1696 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1711 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1720 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1731 struct page **blocks = to_addr_page(percpu, 0); in ops_run_compute6_2()
1732 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1778 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1806 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1814 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1822 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1855 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1859 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_prexor5()
1860 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1889 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1897 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1900 struct page **blocks = to_addr_page(percpu, 0); in ops_run_prexor6()
1901 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1911 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
2038 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
2071 xor_srcs = to_addr_page(percpu, j); in ops_run_reconstruct5()
2072 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2114 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2118 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2136 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2165 blocks = to_addr_page(percpu, j); in ops_run_reconstruct6()
2166 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2184 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2187 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2210 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2217 struct page **xor_srcs = to_addr_page(percpu, 0); in ops_run_check_p()
2218 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2241 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2251 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2253 struct page **srcs = to_addr_page(percpu, 0); in ops_run_check_pq()
2254 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2268 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2271 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2280 struct raid5_percpu *percpu; in raid_run_ops() local
2282 local_lock(&conf->percpu->lock); in raid_run_ops()
2283 percpu = this_cpu_ptr(conf->percpu); in raid_run_ops()
2291 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2294 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2296 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2305 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2307 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2311 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2320 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2322 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2327 ops_run_check_p(sh, percpu); in raid_run_ops()
2329 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2331 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2343 local_unlock(&conf->percpu->lock); in raid_run_ops()
2457 static int scribble_alloc(struct raid5_percpu *percpu, in scribble_alloc() argument
2475 kvfree(percpu->scribble); in scribble_alloc()
2477 percpu->scribble = scribble; in scribble_alloc()
2478 percpu->scribble_obj_size = obj_size; in scribble_alloc()
2496 struct raid5_percpu *percpu; in resize_chunks() local
2498 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
2499 err = scribble_alloc(percpu, new_disks, in resize_chunks()
7297 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in free_scratch_buffer() argument
7299 safe_put_page(percpu->spare_page); in free_scratch_buffer()
7300 percpu->spare_page = NULL; in free_scratch_buffer()
7301 kvfree(percpu->scribble); in free_scratch_buffer()
7302 percpu->scribble = NULL; in free_scratch_buffer()
7305 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) in alloc_scratch_buffer() argument
7307 if (conf->level == 6 && !percpu->spare_page) { in alloc_scratch_buffer()
7308 percpu->spare_page = alloc_page(GFP_KERNEL); in alloc_scratch_buffer()
7309 if (!percpu->spare_page) in alloc_scratch_buffer()
7313 if (scribble_alloc(percpu, in alloc_scratch_buffer()
7319 free_scratch_buffer(conf, percpu); in alloc_scratch_buffer()
7323 local_lock_init(&percpu->lock); in alloc_scratch_buffer()
7331 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_dead()
7337 if (!conf->percpu) in raid5_free_percpu()
7341 free_percpu(conf->percpu); in raid5_free_percpu()
7367 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_up_prepare() local
7369 if (alloc_scratch_buffer(conf, percpu)) { in raid456_cpu_up_prepare()
7381 conf->percpu = alloc_percpu(struct raid5_percpu); in raid5_alloc_percpu()
7382 if (!conf->percpu) in raid5_alloc_percpu()