Lines Matching refs:buf_desc

53 			 struct smc_buf_desc *buf_desc);
1098 static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_unuse() argument
1104 if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) { in smcr_buf_unuse()
1110 smc_llc_do_delete_rkey(lgr, buf_desc); in smcr_buf_unuse()
1111 buf_desc->is_conf_rkey = false; in smcr_buf_unuse()
1117 if (buf_desc->is_reg_err) { in smcr_buf_unuse()
1122 list_del(&buf_desc->list); in smcr_buf_unuse()
1125 smc_buf_free(lgr, is_rmb, buf_desc); in smcr_buf_unuse()
1128 memzero_explicit(buf_desc->cpu_addr, buf_desc->len); in smcr_buf_unuse()
1129 WRITE_ONCE(buf_desc->used, 0); in smcr_buf_unuse()
1196 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_unmap_link() argument
1199 if (is_rmb || buf_desc->is_vm) in smcr_buf_unmap_link()
1200 buf_desc->is_reg_mr[lnk->link_idx] = false; in smcr_buf_unmap_link()
1201 if (!buf_desc->is_map_ib[lnk->link_idx]) in smcr_buf_unmap_link()
1204 if ((is_rmb || buf_desc->is_vm) && in smcr_buf_unmap_link()
1205 buf_desc->mr[lnk->link_idx]) { in smcr_buf_unmap_link()
1206 smc_ib_put_memory_region(buf_desc->mr[lnk->link_idx]); in smcr_buf_unmap_link()
1207 buf_desc->mr[lnk->link_idx] = NULL; in smcr_buf_unmap_link()
1210 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE); in smcr_buf_unmap_link()
1212 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE); in smcr_buf_unmap_link()
1214 sg_free_table(&buf_desc->sgt[lnk->link_idx]); in smcr_buf_unmap_link()
1215 buf_desc->is_map_ib[lnk->link_idx] = false; in smcr_buf_unmap_link()
1222 struct smc_buf_desc *buf_desc, *bf; in smcr_buf_unmap_lgr() local
1227 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) in smcr_buf_unmap_lgr()
1228 smcr_buf_unmap_link(buf_desc, true, lnk); in smcr_buf_unmap_lgr()
1232 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], in smcr_buf_unmap_lgr()
1234 smcr_buf_unmap_link(buf_desc, false, lnk); in smcr_buf_unmap_lgr()
1296 struct smc_buf_desc *buf_desc) in smcr_buf_free() argument
1301 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]); in smcr_buf_free()
1303 if (!buf_desc->is_vm && buf_desc->pages) in smcr_buf_free()
1304 __free_pages(buf_desc->pages, buf_desc->order); in smcr_buf_free()
1305 else if (buf_desc->is_vm && buf_desc->cpu_addr) in smcr_buf_free()
1306 vfree(buf_desc->cpu_addr); in smcr_buf_free()
1307 kfree(buf_desc); in smcr_buf_free()
1311 struct smc_buf_desc *buf_desc) in smcd_buf_free() argument
1315 buf_desc->len += sizeof(struct smcd_cdc_msg); in smcd_buf_free()
1316 smc_ism_unregister_dmb(lgr->smcd, buf_desc); in smcd_buf_free()
1318 kfree(buf_desc->cpu_addr); in smcd_buf_free()
1320 kfree(buf_desc); in smcd_buf_free()
1324 struct smc_buf_desc *buf_desc) in smc_buf_free() argument
1327 smcd_buf_free(lgr, is_rmb, buf_desc); in smc_buf_free()
1329 smcr_buf_free(lgr, is_rmb, buf_desc); in smc_buf_free()
1334 struct smc_buf_desc *buf_desc, *bf_desc; in __smc_lgr_free_bufs() local
1343 list_for_each_entry_safe(buf_desc, bf_desc, buf_list, in __smc_lgr_free_bufs()
1345 list_del(&buf_desc->list); in __smc_lgr_free_bufs()
1346 smc_buf_free(lgr, is_rmb, buf_desc); in __smc_lgr_free_bufs()
2019 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb, in smcr_buf_map_link() argument
2026 if (buf_desc->is_map_ib[lnk->link_idx]) in smcr_buf_map_link()
2029 if (buf_desc->is_vm) { in smcr_buf_map_link()
2030 buf = buf_desc->cpu_addr; in smcr_buf_map_link()
2031 buf_size = buf_desc->len; in smcr_buf_map_link()
2032 offset = offset_in_page(buf_desc->cpu_addr); in smcr_buf_map_link()
2038 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], nents, GFP_KERNEL); in smcr_buf_map_link()
2042 if (buf_desc->is_vm) { in smcr_buf_map_link()
2044 for_each_sg(buf_desc->sgt[lnk->link_idx].sgl, sg, nents, i) { in smcr_buf_map_link()
2053 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl, in smcr_buf_map_link()
2054 buf_desc->cpu_addr, buf_desc->len); in smcr_buf_map_link()
2058 rc = smc_ib_buf_map_sg(lnk, buf_desc, in smcr_buf_map_link()
2066 buf_desc->is_dma_need_sync |= in smcr_buf_map_link()
2067 smc_ib_is_sg_need_sync(lnk, buf_desc) << lnk->link_idx; in smcr_buf_map_link()
2069 if (is_rmb || buf_desc->is_vm) { in smcr_buf_map_link()
2076 buf_desc, lnk->link_idx); in smcr_buf_map_link()
2079 smc_ib_sync_sg_for_device(lnk, buf_desc, in smcr_buf_map_link()
2082 buf_desc->is_map_ib[lnk->link_idx] = true; in smcr_buf_map_link()
2086 smc_ib_buf_unmap_sg(lnk, buf_desc, in smcr_buf_map_link()
2089 sg_free_table(&buf_desc->sgt[lnk->link_idx]); in smcr_buf_map_link()
2096 int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc) in smcr_link_reg_buf() argument
2100 if (!buf_desc->is_reg_mr[link->link_idx]) { in smcr_link_reg_buf()
2102 if (buf_desc->is_vm) in smcr_link_reg_buf()
2103 buf_desc->mr[link->link_idx]->iova = in smcr_link_reg_buf()
2104 (uintptr_t)buf_desc->cpu_addr; in smcr_link_reg_buf()
2105 if (smc_wr_reg_send(link, buf_desc->mr[link->link_idx])) { in smcr_link_reg_buf()
2106 buf_desc->is_reg_err = true; in smcr_link_reg_buf()
2109 buf_desc->is_reg_mr[link->link_idx] = true; in smcr_link_reg_buf()
2117 struct smc_buf_desc *buf_desc, *bf; in _smcr_buf_map_lgr() local
2121 list_for_each_entry_safe(buf_desc, bf, lst, list) { in _smcr_buf_map_lgr()
2122 if (!buf_desc->used) in _smcr_buf_map_lgr()
2124 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk); in _smcr_buf_map_lgr()
2158 struct smc_buf_desc *buf_desc, *bf; in smcr_buf_reg_lgr() local
2164 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) { in smcr_buf_reg_lgr()
2165 if (!buf_desc->used) in smcr_buf_reg_lgr()
2167 rc = smcr_link_reg_buf(lnk, buf_desc); in smcr_buf_reg_lgr()
2182 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) { in smcr_buf_reg_lgr()
2183 if (!buf_desc->used || !buf_desc->is_vm) in smcr_buf_reg_lgr()
2185 rc = smcr_link_reg_buf(lnk, buf_desc); in smcr_buf_reg_lgr()
2199 struct smc_buf_desc *buf_desc; in smcr_new_buf_create() local
2202 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); in smcr_new_buf_create()
2203 if (!buf_desc) in smcr_new_buf_create()
2209 buf_desc->order = get_order(bufsize); in smcr_new_buf_create()
2210 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN | in smcr_new_buf_create()
2213 buf_desc->order); in smcr_new_buf_create()
2214 if (buf_desc->pages) { in smcr_new_buf_create()
2215 buf_desc->cpu_addr = in smcr_new_buf_create()
2216 (void *)page_address(buf_desc->pages); in smcr_new_buf_create()
2217 buf_desc->len = bufsize; in smcr_new_buf_create()
2218 buf_desc->is_vm = false; in smcr_new_buf_create()
2225 buf_desc->order = get_order(bufsize); in smcr_new_buf_create()
2226 buf_desc->cpu_addr = vzalloc(PAGE_SIZE << buf_desc->order); in smcr_new_buf_create()
2227 if (!buf_desc->cpu_addr) in smcr_new_buf_create()
2229 buf_desc->pages = NULL; in smcr_new_buf_create()
2230 buf_desc->len = bufsize; in smcr_new_buf_create()
2231 buf_desc->is_vm = true; in smcr_new_buf_create()
2234 return buf_desc; in smcr_new_buf_create()
2237 kfree(buf_desc); in smcr_new_buf_create()
2245 struct smc_buf_desc *buf_desc, bool is_rmb) in smcr_buf_map_usable_links() argument
2256 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) { in smcr_buf_map_usable_links()
2272 struct smc_buf_desc *buf_desc; in smcd_new_buf_create() local
2276 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); in smcd_new_buf_create()
2277 if (!buf_desc) in smcd_new_buf_create()
2280 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); in smcd_new_buf_create()
2282 kfree(buf_desc); in smcd_new_buf_create()
2289 buf_desc->pages = virt_to_page(buf_desc->cpu_addr); in smcd_new_buf_create()
2291 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg); in smcd_new_buf_create()
2293 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | in smcd_new_buf_create()
2296 if (!buf_desc->cpu_addr) { in smcd_new_buf_create()
2297 kfree(buf_desc); in smcd_new_buf_create()
2300 buf_desc->len = bufsize; in smcd_new_buf_create()
2302 return buf_desc; in smcd_new_buf_create()
2307 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); in __smc_buf_create() local
2335 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list); in __smc_buf_create()
2336 if (buf_desc) { in __smc_buf_create()
2337 buf_desc->is_dma_need_sync = 0; in __smc_buf_create()
2344 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize); in __smc_buf_create()
2346 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize); in __smc_buf_create()
2348 if (PTR_ERR(buf_desc) == -ENOMEM) in __smc_buf_create()
2350 if (IS_ERR(buf_desc)) { in __smc_buf_create()
2360 buf_desc->used = 1; in __smc_buf_create()
2362 list_add(&buf_desc->list, buf_list); in __smc_buf_create()
2367 if (IS_ERR(buf_desc)) in __smc_buf_create()
2368 return PTR_ERR(buf_desc); in __smc_buf_create()
2371 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) { in __smc_buf_create()
2372 smcr_buf_unuse(buf_desc, is_rmb, lgr); in __smc_buf_create()
2378 conn->rmb_desc = buf_desc; in __smc_buf_create()
2383 smc_rmb_wnd_update_limit(buf_desc->len); in __smc_buf_create()
2387 conn->sndbuf_desc = buf_desc; in __smc_buf_create()