Lines Matching refs:rvu
26 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
31 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
37 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
44 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) in is_mac_feature_supported() argument
49 if (!is_pf_cgxmapped(rvu, pf)) in is_mac_feature_supported()
52 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in is_mac_feature_supported()
53 cgxd = rvu_cgx_pdata(cgx_id, rvu); in is_mac_feature_supported()
58 #define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
60 static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) in cgxlmac_to_pfmap() argument
62 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; in cgxlmac_to_pfmap()
65 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) in cgxlmac_to_pf() argument
69 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id); in cgxlmac_to_pf()
76 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); in cgxlmac_to_pf()
84 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) in rvu_cgx_pdata() argument
86 if (cgx_id >= rvu->cgx_cnt_max) in rvu_cgx_pdata()
89 return rvu->cgx_idmap[cgx_id]; in rvu_cgx_pdata()
93 void *rvu_first_cgx_pdata(struct rvu *rvu) in rvu_first_cgx_pdata() argument
98 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) { in rvu_first_cgx_pdata()
99 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu); in rvu_first_cgx_pdata()
108 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf, in rvu_map_cgx_nix_block() argument
111 struct rvu_pfvf *pfvf = &rvu->pf[pf]; in rvu_map_cgx_nix_block()
121 static int rvu_map_cgx_lmac_pf(struct rvu *rvu) in rvu_map_cgx_lmac_pf() argument
123 struct npc_pkind *pkind = &rvu->hw->pkind; in rvu_map_cgx_lmac_pf()
124 int cgx_cnt_max = rvu->cgx_cnt_max; in rvu_map_cgx_lmac_pf()
134 if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) in rvu_map_cgx_lmac_pf()
141 size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); in rvu_map_cgx_lmac_pf()
142 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); in rvu_map_cgx_lmac_pf()
143 if (!rvu->pf2cgxlmac_map) in rvu_map_cgx_lmac_pf()
147 memset(rvu->pf2cgxlmac_map, 0xFF, size); in rvu_map_cgx_lmac_pf()
150 rvu->cgxlmac2pf_map = in rvu_map_cgx_lmac_pf()
151 devm_kzalloc(rvu->dev, in rvu_map_cgx_lmac_pf()
152 cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), in rvu_map_cgx_lmac_pf()
154 if (!rvu->cgxlmac2pf_map) in rvu_map_cgx_lmac_pf()
157 rvu->cgx_mapped_pfs = 0; in rvu_map_cgx_lmac_pf()
159 if (!rvu_cgx_pdata(cgx, rvu)) in rvu_map_cgx_lmac_pf()
161 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); in rvu_map_cgx_lmac_pf()
162 for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { in rvu_map_cgx_lmac_pf()
163 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), in rvu_map_cgx_lmac_pf()
165 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); in rvu_map_cgx_lmac_pf()
166 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; in rvu_map_cgx_lmac_pf()
169 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); in rvu_map_cgx_lmac_pf()
170 rvu->cgx_mapped_pfs++; in rvu_map_cgx_lmac_pf()
171 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); in rvu_map_cgx_lmac_pf()
172 rvu->cgx_mapped_vfs += numvfs; in rvu_map_cgx_lmac_pf()
179 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) in rvu_cgx_send_link_info() argument
190 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); in rvu_cgx_send_link_info()
191 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, in rvu_cgx_send_link_info()
199 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); in rvu_cgx_send_link_info()
201 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); in rvu_cgx_send_link_info()
204 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); in rvu_cgx_send_link_info()
213 struct rvu *rvu = data; in cgx_lmac_postevent() local
220 spin_lock(&rvu->cgx_evq_lock); in cgx_lmac_postevent()
221 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); in cgx_lmac_postevent()
222 spin_unlock(&rvu->cgx_evq_lock); in cgx_lmac_postevent()
225 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work); in cgx_lmac_postevent()
230 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) in cgx_notify_pfs() argument
238 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); in cgx_notify_pfs()
242 rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); in cgx_notify_pfs()
246 if (!test_bit(pfid, &rvu->pf_notify_bmap)) { in cgx_notify_pfs()
247 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n", in cgx_notify_pfs()
254 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); in cgx_notify_pfs()
258 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); in cgx_notify_pfs()
259 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); in cgx_notify_pfs()
261 dev_warn(rvu->dev, "notification to pf %d failed\n", in cgx_notify_pfs()
268 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work); in cgx_evhandler_task() local
275 spin_lock_irqsave(&rvu->cgx_evq_lock, flags); in cgx_evhandler_task()
276 qentry = list_first_entry_or_null(&rvu->cgx_evq_head, in cgx_evhandler_task()
281 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); in cgx_evhandler_task()
288 cgx_notify_pfs(event, rvu); in cgx_evhandler_task()
293 static int cgx_lmac_event_handler_init(struct rvu *rvu) in cgx_lmac_event_handler_init() argument
300 spin_lock_init(&rvu->cgx_evq_lock); in cgx_lmac_event_handler_init()
301 INIT_LIST_HEAD(&rvu->cgx_evq_head); in cgx_lmac_event_handler_init()
302 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task); in cgx_lmac_event_handler_init()
303 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); in cgx_lmac_event_handler_init()
304 if (!rvu->cgx_evh_wq) { in cgx_lmac_event_handler_init()
305 dev_err(rvu->dev, "alloc workqueue failed"); in cgx_lmac_event_handler_init()
310 cb.data = rvu; in cgx_lmac_event_handler_init()
312 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in cgx_lmac_event_handler_init()
313 cgxd = rvu_cgx_pdata(cgx, rvu); in cgx_lmac_event_handler_init()
317 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { in cgx_lmac_event_handler_init()
320 dev_err(rvu->dev, in cgx_lmac_event_handler_init()
329 static void rvu_cgx_wq_destroy(struct rvu *rvu) in rvu_cgx_wq_destroy() argument
331 if (rvu->cgx_evh_wq) { in rvu_cgx_wq_destroy()
332 destroy_workqueue(rvu->cgx_evh_wq); in rvu_cgx_wq_destroy()
333 rvu->cgx_evh_wq = NULL; in rvu_cgx_wq_destroy()
337 int rvu_cgx_init(struct rvu *rvu) in rvu_cgx_init() argument
345 rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); in rvu_cgx_init()
346 if (!rvu->cgx_cnt_max) { in rvu_cgx_init()
347 dev_info(rvu->dev, "No CGX devices found!\n"); in rvu_cgx_init()
351 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * in rvu_cgx_init()
353 if (!rvu->cgx_idmap) in rvu_cgx_init()
357 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) in rvu_cgx_init()
358 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); in rvu_cgx_init()
361 err = rvu_map_cgx_lmac_pf(rvu); in rvu_cgx_init()
366 err = cgx_lmac_event_handler_init(rvu); in rvu_cgx_init()
370 mutex_init(&rvu->cgx_cfg_lock); in rvu_cgx_init()
378 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in rvu_cgx_init()
379 cgxd = rvu_cgx_pdata(cgx, rvu); in rvu_cgx_init()
384 dev_err(rvu->dev, in rvu_cgx_init()
392 int rvu_cgx_exit(struct rvu *rvu) in rvu_cgx_exit() argument
398 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { in rvu_cgx_exit()
399 cgxd = rvu_cgx_pdata(cgx, rvu); in rvu_cgx_exit()
403 for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) in rvu_cgx_exit()
410 rvu_cgx_wq_destroy(rvu); in rvu_cgx_exit()
418 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) in is_cgx_config_permitted() argument
421 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) in is_cgx_config_permitted()
426 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) in rvu_cgx_enadis_rx_bp() argument
432 if (!is_pf_cgxmapped(rvu, pf)) in rvu_cgx_enadis_rx_bp()
435 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_enadis_rx_bp()
436 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_cgx_enadis_rx_bp()
446 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) in rvu_cgx_config_rxtx() argument
453 if (!is_cgx_config_permitted(rvu, pcifunc)) in rvu_cgx_config_rxtx()
456 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_rxtx()
457 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_cgx_config_rxtx()
471 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) in rvu_cgx_disable_dmac_entries() argument
480 if (!is_cgx_config_permitted(rvu, pcifunc)) in rvu_cgx_disable_dmac_entries()
483 if (rvu_npc_exact_has_match_table(rvu)) { in rvu_cgx_disable_dmac_entries()
484 rvu_npc_exact_reset(rvu, pcifunc); in rvu_cgx_disable_dmac_entries()
488 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_disable_dmac_entries()
507 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_start_rxtx() argument
510 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_start_rxtx()
514 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stop_rxtx() argument
517 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_stop_rxtx()
521 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, in rvu_lmac_get_stats() argument
531 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_lmac_get_stats()
534 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); in rvu_lmac_get_stats()
535 cgxd = rvu_cgx_pdata(cgx_idx, rvu); in rvu_lmac_get_stats()
565 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stats() argument
568 return rvu_lmac_get_stats(rvu, req, (void *)rsp); in rvu_mbox_handler_cgx_stats()
571 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_rpm_stats() argument
574 return rvu_lmac_get_stats(rvu, req, (void *)rsp); in rvu_mbox_handler_rpm_stats()
577 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, in rvu_mbox_handler_cgx_fec_stats() argument
586 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_fec_stats()
588 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); in rvu_mbox_handler_cgx_fec_stats()
590 cgxd = rvu_cgx_pdata(cgx_idx, rvu); in rvu_mbox_handler_cgx_fec_stats()
595 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_set() argument
602 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_set()
605 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_mac_addr_set()
606 return rvu_npc_exact_mac_addr_set(rvu, req, rsp); in rvu_mbox_handler_cgx_mac_addr_set()
608 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_set()
615 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_add() argument
623 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_add()
626 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_mac_addr_add()
627 return rvu_npc_exact_mac_addr_add(rvu, req, rsp); in rvu_mbox_handler_cgx_mac_addr_add()
629 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_add()
639 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_del() argument
646 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_del()
649 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_mac_addr_del()
650 return rvu_npc_exact_mac_addr_del(rvu, req, rsp); in rvu_mbox_handler_cgx_mac_addr_del()
652 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_del()
656 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_max_entries_get() argument
668 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { in rvu_mbox_handler_cgx_mac_max_entries_get()
673 if (rvu_npc_exact_has_match_table(rvu)) { in rvu_mbox_handler_cgx_mac_max_entries_get()
674 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu); in rvu_mbox_handler_cgx_mac_max_entries_get()
678 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_max_entries_get()
683 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_get() argument
692 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_get()
695 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_get()
705 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_promisc_enable() argument
712 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_promisc_enable()
716 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_promisc_enable()
717 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc); in rvu_mbox_handler_cgx_promisc_enable()
719 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_promisc_enable()
725 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_promisc_disable() argument
731 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_promisc_disable()
735 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_promisc_disable()
736 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc); in rvu_mbox_handler_cgx_promisc_disable()
738 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_promisc_disable()
744 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) in rvu_cgx_ptp_rx_cfg() argument
746 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_cgx_ptp_rx_cfg()
752 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) in rvu_cgx_ptp_rx_cfg()
759 !is_pf_cgxmapped(rvu, pf)) in rvu_cgx_ptp_rx_cfg()
762 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_ptp_rx_cfg()
763 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_cgx_ptp_rx_cfg()
771 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) in rvu_cgx_ptp_rx_cfg()
779 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_ptp_rx_enable() argument
782 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) in rvu_mbox_handler_cgx_ptp_rx_enable()
785 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_ptp_rx_enable()
788 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_ptp_rx_disable() argument
791 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_ptp_rx_disable()
794 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) in rvu_cgx_config_linkevents() argument
799 if (!is_cgx_config_permitted(rvu, pcifunc)) in rvu_cgx_config_linkevents()
802 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_linkevents()
805 set_bit(pf, &rvu->pf_notify_bmap); in rvu_cgx_config_linkevents()
807 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu); in rvu_cgx_config_linkevents()
809 clear_bit(pf, &rvu->pf_notify_bmap); in rvu_cgx_config_linkevents()
815 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_start_linkevents() argument
818 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_start_linkevents()
822 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_stop_linkevents() argument
825 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_stop_linkevents()
829 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_get_linkinfo() argument
837 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_get_linkinfo()
840 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_get_linkinfo()
842 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id, in rvu_mbox_handler_cgx_get_linkinfo()
847 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, in rvu_mbox_handler_cgx_features_get() argument
855 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_features_get()
858 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); in rvu_mbox_handler_cgx_features_get()
859 cgxd = rvu_cgx_pdata(cgx_idx, rvu); in rvu_mbox_handler_cgx_features_get()
865 u32 rvu_cgx_get_fifolen(struct rvu *rvu) in rvu_cgx_get_fifolen() argument
870 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); in rvu_cgx_get_fifolen()
876 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) in rvu_cgx_get_lmac_fifolen() argument
881 cgxd = rvu_cgx_pdata(cgx, rvu); in rvu_cgx_get_lmac_fifolen()
892 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) in rvu_cgx_config_intlbk() argument
898 if (!is_cgx_config_permitted(rvu, pcifunc)) in rvu_cgx_config_intlbk()
901 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_config_intlbk()
902 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); in rvu_cgx_config_intlbk()
904 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), in rvu_cgx_config_intlbk()
908 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_intlbk_enable() argument
911 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); in rvu_mbox_handler_cgx_intlbk_enable()
915 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_intlbk_disable() argument
918 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); in rvu_mbox_handler_cgx_intlbk_disable()
922 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause) in rvu_cgx_cfg_pause_frm() argument
930 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) in rvu_cgx_cfg_pause_frm()
936 if (!is_pf_cgxmapped(rvu, pf)) in rvu_cgx_cfg_pause_frm()
939 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_cfg_pause_frm()
940 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_cgx_cfg_pause_frm()
945 dev_warn(rvu->dev, in rvu_cgx_cfg_pause_frm()
950 mutex_lock(&rvu->rsrc_lock); in rvu_cgx_cfg_pause_frm()
953 mutex_unlock(&rvu->rsrc_lock); in rvu_cgx_cfg_pause_frm()
956 mutex_unlock(&rvu->rsrc_lock); in rvu_cgx_cfg_pause_frm()
961 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, in rvu_mbox_handler_cgx_cfg_pause_frm() argument
974 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_cfg_pause_frm()
977 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_cfg_pause_frm()
978 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_mbox_handler_cgx_cfg_pause_frm()
982 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause); in rvu_mbox_handler_cgx_cfg_pause_frm()
989 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_get_phy_fec_stats() argument
995 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_get_phy_fec_stats()
998 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_get_phy_fec_stats()
999 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); in rvu_mbox_handler_cgx_get_phy_fec_stats()
1005 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, in rvu_cgx_nix_cuml_stats() argument
1015 if (!cgxd || !rvu) in rvu_cgx_nix_cuml_stats()
1018 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); in rvu_cgx_nix_cuml_stats()
1026 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); in rvu_cgx_nix_cuml_stats()
1029 block = &rvu->hw->block[blkaddr]; in rvu_cgx_nix_cuml_stats()
1037 *stat += rvu_read64(rvu, blkaddr, in rvu_cgx_nix_cuml_stats()
1040 *stat += rvu_read64(rvu, blkaddr, in rvu_cgx_nix_cuml_stats()
1047 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start) in rvu_cgx_start_stop_io() argument
1052 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) in rvu_cgx_start_stop_io()
1055 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; in rvu_cgx_start_stop_io()
1056 pfvf = rvu_get_pfvf(rvu, pcifunc); in rvu_cgx_start_stop_io()
1058 mutex_lock(&rvu->cgx_cfg_lock); in rvu_cgx_start_stop_io()
1077 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK, in rvu_cgx_start_stop_io()
1080 dev_err(rvu->dev, "Unable to %s CGX\n", in rvu_cgx_start_stop_io()
1090 mutex_unlock(&rvu->cgx_cfg_lock); in rvu_cgx_start_stop_io()
1094 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, in rvu_mbox_handler_cgx_set_fec_param() argument
1101 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_set_fec_param()
1106 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_set_fec_param()
1111 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, in rvu_mbox_handler_cgx_get_aux_link_info() argument
1117 if (!rvu->fwdata) in rvu_mbox_handler_cgx_get_aux_link_info()
1120 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_get_aux_link_info()
1123 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_get_aux_link_info()
1125 if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX) in rvu_mbox_handler_cgx_get_aux_link_info()
1127 &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id], in rvu_mbox_handler_cgx_get_aux_link_info()
1131 &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], in rvu_mbox_handler_cgx_get_aux_link_info()
1137 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, in rvu_mbox_handler_cgx_set_link_mode() argument
1145 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_set_link_mode()
1148 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); in rvu_mbox_handler_cgx_set_link_mode()
1149 cgxd = rvu_cgx_pdata(cgx_idx, rvu); in rvu_mbox_handler_cgx_set_link_mode()
1154 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req, in rvu_mbox_handler_cgx_mac_addr_reset() argument
1160 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_reset()
1163 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_reset()
1165 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_mac_addr_reset()
1166 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp); in rvu_mbox_handler_cgx_mac_addr_reset()
1171 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, in rvu_mbox_handler_cgx_mac_addr_update() argument
1178 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) in rvu_mbox_handler_cgx_mac_addr_update()
1181 if (rvu_npc_exact_has_match_table(rvu)) in rvu_mbox_handler_cgx_mac_addr_update()
1182 return rvu_npc_exact_mac_addr_update(rvu, req, rsp); in rvu_mbox_handler_cgx_mac_addr_update()
1184 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_mac_addr_update()
1188 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, in rvu_cgx_prio_flow_ctrl_cfg() argument
1200 if (!is_pf_cgxmapped(rvu, pf)) in rvu_cgx_prio_flow_ctrl_cfg()
1203 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_cgx_prio_flow_ctrl_cfg()
1204 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_cgx_prio_flow_ctrl_cfg()
1209 dev_warn(rvu->dev, in rvu_cgx_prio_flow_ctrl_cfg()
1214 mutex_lock(&rvu->rsrc_lock); in rvu_cgx_prio_flow_ctrl_cfg()
1217 mutex_unlock(&rvu->rsrc_lock); in rvu_cgx_prio_flow_ctrl_cfg()
1220 mutex_unlock(&rvu->rsrc_lock); in rvu_cgx_prio_flow_ctrl_cfg()
1225 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, in rvu_mbox_handler_cgx_prio_flow_ctrl_cfg() argument
1238 if (!is_pf_cgxmapped(rvu, pf)) in rvu_mbox_handler_cgx_prio_flow_ctrl_cfg()
1241 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); in rvu_mbox_handler_cgx_prio_flow_ctrl_cfg()
1242 cgxd = rvu_cgx_pdata(cgx_id, rvu); in rvu_mbox_handler_cgx_prio_flow_ctrl_cfg()
1245 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause, in rvu_mbox_handler_cgx_prio_flow_ctrl_cfg()