| /linux/drivers/infiniband/sw/rxe/ |
| A D | rxe_mw.c | 32 mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? in rxe_alloc_mw() 124 ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > in rxe_check_bind_mw() 140 mw->rkey = (mw->rkey & ~0xff) | key; in rxe_do_bind_mw() 143 mw->addr = wqe->wr.wr.mw.addr; in rxe_do_bind_mw() 144 mw->length = wqe->wr.wr.mw.length; in rxe_do_bind_mw() 219 rxe_put(mw); in rxe_bind_mw() 242 qp = mw->qp; in rxe_do_invalidate_mw() 247 mr = mw->mr; in rxe_do_invalidate_mw() 265 if (!mw) { in rxe_invalidate_mw() 298 if (!mw) in rxe_lookup_mw() [all …]
|
| A D | rxe.h | 58 #define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \ argument 59 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) 79 #define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \ argument 80 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__) 100 #define rxe_info_mw(mw, fmt, ...) ibdev_info_ratelimited((mw)->ibmw.device, \ argument 101 "mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
|
| A D | rxe_verbs.h | 454 static inline struct rxe_mw *to_rmw(struct ib_mw *mw) in to_rmw() argument 456 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; in to_rmw() 469 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) in rxe_mw_pd() argument 471 return to_rpd(mw->ibmw.pd); in rxe_mw_pd()
|
| A D | rxe_resp.c | 487 if (!mw) { in check_rkey() 493 mr = mw->mr; in check_rkey() 504 rxe_put(mw); in check_rkey() 505 mw = NULL; in check_rkey() 559 if (mw) in check_rkey() 560 rxe_put(mw); in check_rkey() 837 if (!mw) in rxe_recheck_mr() 840 mr = mw->mr; in rxe_recheck_mr() 841 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID || in rxe_recheck_mr() 843 rxe_put(mw); in rxe_recheck_mr() [all …]
|
| /linux/net/netfilter/ipvs/ |
| A D | ip_vs_wrr.c | 63 int mw; /* maximum weight */ member 119 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_init_svc() 120 mark->cw = mark->mw; in ip_vs_wrr_init_svc() 146 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); in ip_vs_wrr_dest_changed() 147 if (mark->cw > mark->mw || !mark->cw) in ip_vs_wrr_dest_changed() 148 mark->cw = mark->mw; in ip_vs_wrr_dest_changed() 172 if (mark->mw == 0) in ip_vs_wrr_schedule() 188 mark->cw = mark->mw; in ip_vs_wrr_schedule()
|
| A D | ip_vs_mh.c | 347 int mw, shift; in ip_vs_mh_shift_weight() local 365 mw = weight / gcd; in ip_vs_mh_shift_weight() 368 shift = fls(mw) - IP_VS_MH_TAB_BITS; in ip_vs_mh_shift_weight()
|
| /linux/drivers/ntb/ |
| A D | ntb_transport.c | 633 if (!mw->virt_addr) in ntb_transport_setup_qp_mw() 798 if (!mw->virt_addr) in ntb_free_mw() 803 mw->alloc_addr, mw->dma_addr); in ntb_free_mw() 804 mw->xlat_size = 0; in ntb_free_mw() 805 mw->buff_size = 0; in ntb_free_mw() 835 mw->alloc_size); in ntb_alloc_mw_buffer() 847 if (mw->alloc_size > mw->buff_size) { in ntb_alloc_mw_buffer() 893 if (mw->buff_size) in ntb_set_mw() 1339 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); in ntb_transport_probe() 1340 if (!mw->vbase) { in ntb_transport_probe() [all …]
|
| /linux/fs/ocfs2/ |
| A D | dlmglue.c | 474 struct ocfs2_mask_waiter *mw; in ocfs2_track_lock_wait() local 898 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) in lockres_set_flags() 902 mw->mw_status = 0; in lockres_set_flags() 903 complete(&mw->mw_complete); in lockres_set_flags() 1393 ocfs2_init_start_time(mw); in ocfs2_init_mask_waiter() 1401 return mw->mw_status; in ocfs2_wait_for_mask() 1414 mw->mw_mask = mask; in lockres_add_mask_waiter() 1415 mw->mw_goal = goal; in lockres_add_mask_waiter() 1428 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) in __lockres_remove_mask_waiter() 1462 ret = mw->mw_status; in ocfs2_wait_for_mask_interruptible() [all …]
|
| /linux/arch/mips/txx9/generic/ |
| A D | mem_tx4927.c | 46 unsigned int mw = 0; in tx4927_process_sdccr() local 61 mw = 8 >> sdccr_mw; in tx4927_process_sdccr() 64 return rs * cs * mw * bs; in tx4927_process_sdccr()
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | mr.c | 815 struct mlx4_mw *mw) in mlx4_mw_alloc() argument 830 mw->pd = pd; in mlx4_mw_alloc() 831 mw->type = type; in mlx4_mw_alloc() 832 mw->enabled = MLX4_MPT_DISABLED; in mlx4_mw_alloc() 860 if (mw->type == MLX4_MW_TYPE_2) { in mlx4_mw_enable() 867 key_to_hw_index(mw->key) & in mlx4_mw_enable() 873 mw->enabled = MLX4_MPT_EN_HW; in mlx4_mw_enable() 892 if (mw->enabled == MLX4_MPT_EN_HW) { in mlx4_mw_free() 894 key_to_hw_index(mw->key) & in mlx4_mw_free() 899 mw->enabled = MLX4_MPT_EN_SW; in mlx4_mw_free() [all …]
|
| /linux/drivers/infiniband/hw/hns/ |
| A D | hns_roce_mr.c | 484 struct hns_roce_mw *mw) in hns_roce_mw_free() argument 489 if (mw->enabled) { in hns_roce_mw_free() 497 key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 501 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 505 struct hns_roce_mw *mw) in hns_roce_mw_enable() argument 537 mw->enabled = 1; in hns_roce_mw_enable() 569 mw->rkey = hw_index_to_key(id); in hns_roce_alloc_mw() 571 ibmw->rkey = mw->rkey; in hns_roce_alloc_mw() 572 mw->pdn = to_hr_pd(ibmw->pd)->pdn; in hns_roce_alloc_mw() 584 hns_roce_mw_free(hr_dev, mw); in hns_roce_alloc_mw() [all …]
|
| /linux/drivers/clk/rockchip/ |
| A D | clk.h | 614 .mux_width = mw, \ 635 .mux_width = mw, \ 683 #define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \ argument 694 .mux_width = mw, \ 712 .mux_width = mw, \ 721 mw, mf, ds, dw, df, dt) \ argument 731 .mux_width = mw, \ 791 #define COMPOSITE_DDRCLK(_id, cname, pnames, f, mo, ms, mw, \ argument 802 .mux_width = mw, \ 959 .mux_width = mw, \ [all …]
|
| /linux/drivers/net/ethernet/marvell/octeontx2/af/ |
| A D | rvu.c | 2210 mbox = &mw->mbox; in __rvu_mbox_handler() 2301 mbox = &mw->mbox_up; in __rvu_mbox_up_handler() 2485 if (!mw->mbox_wq) { in rvu_mbox_init() 2492 if (!mw->mbox_wrk) { in rvu_mbox_init() 2499 if (!mw->mbox_wrk_up) { in rvu_mbox_init() 2546 if (mw->mbox_wq) { in rvu_mbox_destroy() 2548 mw->mbox_wq = NULL; in rvu_mbox_destroy() 2574 mbox = &mw->mbox; in rvu_queue_work() 2589 queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); in rvu_queue_work() 2591 mbox = &mw->mbox_up; in rvu_queue_work() [all …]
|
| /linux/include/linux/usb/ |
| A D | pd.h | 250 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) argument 375 #define RDO_BATT_OP_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_OP_PWR_SHIFT) argument 376 #define RDO_BATT_MAX_PWR(mw) ((((mw) / 250) & RDO_PWR_MASK) << RDO_BATT_MAX_PWR_SHIFT) argument
|
| /linux/drivers/infiniband/hw/mlx4/ |
| A D | mr.c | 616 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_alloc_mw() local 620 to_mlx4_type(ibmw->type), &mw->mmw); in mlx4_ib_alloc_mw() 624 err = mlx4_mw_enable(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 628 ibmw->rkey = mw->mmw.key; in mlx4_ib_alloc_mw() 632 mlx4_mw_free(dev->dev, &mw->mmw); in mlx4_ib_alloc_mw() 638 struct mlx4_ib_mw *mw = to_mmw(ibmw); in mlx4_ib_dealloc_mw() local 640 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw()
|
| /linux/drivers/mtd/maps/ |
| A D | physmap-core.c | 147 map_word mw; in physmap_addr_gpios_read() local 155 mw.x[0] = word; in physmap_addr_gpios_read() 156 return mw; in physmap_addr_gpios_read() 181 static void physmap_addr_gpios_write(struct map_info *map, map_word mw, in physmap_addr_gpios_write() argument 192 word = mw.x[0]; in physmap_addr_gpios_write()
|
| /linux/drivers/pci/endpoint/functions/ |
| A D | pci-epf-ntb.c | 236 enum pci_epc_interface_type type, u32 mw) in epf_ntb_configure_mw() argument 252 peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; in epf_ntb_configure_mw() 259 if (mw + NTB_MW_OFFSET == BAR_DB_MW1) in epf_ntb_configure_mw() 262 if (size > ntb->mws_size[mw]) { in epf_ntb_configure_mw() 265 pci_epc_interface_string(type), mw, size, in epf_ntb_configure_mw() 266 ntb->mws_size[mw]); in epf_ntb_configure_mw() 278 pci_epc_interface_string(type), mw); in epf_ntb_configure_mw() 295 enum pci_epc_interface_type type, u32 mw) in epf_ntb_teardown_mw() argument 309 peer_barno = peer_ntb_epc->epf_ntb_bar[mw + NTB_MW_OFFSET]; in epf_ntb_teardown_mw() 314 if (mw + NTB_MW_OFFSET == BAR_DB_MW1) in epf_ntb_teardown_mw()
|
| A D | pci-epf-vntb.c | 202 static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) in epf_ntb_configure_mw() argument 209 phys_addr = ntb->vpci_mw_phy[mw]; in epf_ntb_configure_mw() 219 "Failed to map memory window %d address\n", mw); in epf_ntb_configure_mw() 231 static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) in epf_ntb_teardown_mw() argument 236 ntb->vpci_mw_phy[mw]); in epf_ntb_teardown_mw()
|
| /linux/include/dt-bindings/usb/ |
| A D | pd.h | 44 #define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT) argument
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| A D | ib_verbs.h | 53 struct ib_mw *mw; member 254 int bnxt_re_dealloc_mw(struct ib_mw *mw);
|
| A D | ib_verbs.c | 473 if (fence->mw) { in bnxt_re_destroy_fence_mr() 475 fence->mw = NULL; in bnxt_re_destroy_fence_mr() 501 struct ib_mw *mw; in bnxt_re_create_fence_mr() local 553 if (IS_ERR(mw)) { in bnxt_re_create_fence_mr() 556 rc = PTR_ERR(mw); in bnxt_re_create_fence_mr() 559 fence->mw = mw; in bnxt_re_create_fence_mr() 4048 mw = kzalloc(sizeof(*mw), GFP_KERNEL); in bnxt_re_alloc_mw() 4049 if (!mw) in bnxt_re_alloc_mw() 4062 mw->ib_mw.rkey = mw->qplib_mw.rkey; in bnxt_re_alloc_mw() 4070 kfree(mw); in bnxt_re_alloc_mw() [all …]
|
| /linux/drivers/media/platform/nxp/dw100/ |
| A D | dw100.c | 382 u32 sw, sh, mw, mh, idx; in dw100_ctrl_dewarping_map_init() local 389 mw = ctrl->dims[0]; in dw100_ctrl_dewarping_map_init() 394 qdx = qsw / (mw - 1); in dw100_ctrl_dewarping_map_init() 397 ctx->map_width = mw; in dw100_ctrl_dewarping_map_init() 399 ctx->map_size = mh * mw * sizeof(u32); in dw100_ctrl_dewarping_map_init() 402 qy = min_t(u32, (idx / mw) * qdy, qsh); in dw100_ctrl_dewarping_map_init() 403 qx = min_t(u32, (idx % mw) * qdx, qsw); in dw100_ctrl_dewarping_map_init()
|
| /linux/include/uapi/rdma/ |
| A D | rdma_user_rxe.h | 120 } mw; member
|
| /linux/drivers/infiniband/core/ |
| A D | uverbs_main.c | 116 int uverbs_dealloc_mw(struct ib_mw *mw) in uverbs_dealloc_mw() argument 118 struct ib_pd *pd = mw->pd; in uverbs_dealloc_mw() 121 ret = mw->device->ops.dealloc_mw(mw); in uverbs_dealloc_mw() 126 kfree(mw); in uverbs_dealloc_mw()
|
| /linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
| A D | otx2_pf.c | 308 mbox = &mw->mbox; in otx2_queue_vf_work() 319 mw[i].num_msgs = hdr->num_msgs; in otx2_queue_vf_work() 321 queue_work(mbox_wq, &mw[i].mbox_wrk); in otx2_queue_vf_work() 324 mbox = &mw->mbox_up; in otx2_queue_vf_work() 328 mw[i].up_num_msgs = hdr->num_msgs; in otx2_queue_vf_work() 330 queue_work(mbox_wq, &mw[i].mbox_up_wrk); in otx2_queue_vf_work() 964 struct mbox *mw = &pf->mbox; in otx2_pfaf_mbox_intr_handler() local 980 mbox = &mw->mbox_up; in otx2_pfaf_mbox_intr_handler() 986 queue_work(pf->mbox_wq, &mw->mbox_up_wrk); in otx2_pfaf_mbox_intr_handler() 996 mbox = &mw->mbox; in otx2_pfaf_mbox_intr_handler() [all …]
|