Lines Matching refs:adev

36 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)  in xgpu_nv_mailbox_send_ack()  argument
41 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) in xgpu_nv_mailbox_set_valid() argument
55 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) in xgpu_nv_mailbox_peek_msg() argument
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, in xgpu_nv_mailbox_rcv_msg() argument
75 xgpu_nv_mailbox_send_ack(adev); in xgpu_nv_mailbox_rcv_msg()
80 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) in xgpu_nv_peek_ack() argument
85 static int xgpu_nv_poll_ack(struct amdgpu_device *adev) in xgpu_nv_poll_ack() argument
99 dev_err(adev->dev, "Doesn't get TRN_MSG_ACK from pf in %d msec \n", NV_MAILBOX_POLL_ACK_TIMEDOUT); in xgpu_nv_poll_ack()
104 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) in xgpu_nv_poll_msg() argument
108 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); in xgpu_nv_poll_msg()
114 r = xgpu_nv_mailbox_rcv_msg(adev, event); in xgpu_nv_poll_msg()
116 dev_dbg(adev->dev, "rcv_msg 0x%x after %llu ms\n", in xgpu_nv_poll_msg()
120 if (!amdgpu_ras_is_rma(adev)) { in xgpu_nv_poll_msg()
122 dev_err(adev->dev, "VF is in an unrecoverable state. " in xgpu_nv_poll_msg()
132 dev_dbg(adev->dev, "nv_poll_msg timed out\n"); in xgpu_nv_poll_msg()
137 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, in xgpu_nv_mailbox_trans_msg() argument
150 xgpu_nv_mailbox_set_valid(adev, false); in xgpu_nv_mailbox_trans_msg()
151 trn = xgpu_nv_peek_ack(adev); in xgpu_nv_mailbox_trans_msg()
153 dev_err_ratelimited(adev->dev, "trn=%x ACK should not assert! wait again !\n", trn); in xgpu_nv_mailbox_trans_msg()
158 dev_dbg(adev->dev, "trans_msg req = 0x%x, data1 = 0x%x\n", req, data1); in xgpu_nv_mailbox_trans_msg()
163 xgpu_nv_mailbox_set_valid(adev, true); in xgpu_nv_mailbox_trans_msg()
166 r = xgpu_nv_poll_ack(adev); in xgpu_nv_mailbox_trans_msg()
168 dev_err(adev->dev, "Doesn't get ack from pf, continue\n"); in xgpu_nv_mailbox_trans_msg()
170 xgpu_nv_mailbox_set_valid(adev, false); in xgpu_nv_mailbox_trans_msg()
173 static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev, in xgpu_nv_send_access_requests_with_param() argument
181 if (amdgpu_ras_is_rma(adev)) in xgpu_nv_send_access_requests_with_param()
184 xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3); in xgpu_nv_send_access_requests_with_param()
213 r = xgpu_nv_poll_msg(adev, event); in xgpu_nv_send_access_requests_with_param()
219 dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r); in xgpu_nv_send_access_requests_with_param()
222 adev->virt.req_init_data_ver = 0; in xgpu_nv_send_access_requests_with_param()
225 adev->virt.req_init_data_ver = in xgpu_nv_send_access_requests_with_param()
229 if (adev->virt.req_init_data_ver < 1) in xgpu_nv_send_access_requests_with_param()
230 adev->virt.req_init_data_ver = 1; in xgpu_nv_send_access_requests_with_param()
236 adev->virt.fw_reserve.checksum_key = in xgpu_nv_send_access_requests_with_param()
244 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, in xgpu_nv_send_access_requests() argument
247 return xgpu_nv_send_access_requests_with_param(adev, in xgpu_nv_send_access_requests()
251 static int xgpu_nv_request_reset(struct amdgpu_device *adev) in xgpu_nv_request_reset() argument
256 ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); in xgpu_nv_request_reset()
265 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, in xgpu_nv_request_full_gpu_access() argument
271 return xgpu_nv_send_access_requests(adev, req); in xgpu_nv_request_full_gpu_access()
274 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, in xgpu_nv_release_full_gpu_access() argument
281 r = xgpu_nv_send_access_requests(adev, req); in xgpu_nv_release_full_gpu_access()
286 static int xgpu_nv_request_init_data(struct amdgpu_device *adev) in xgpu_nv_request_init_data() argument
288 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); in xgpu_nv_request_init_data()
291 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, in xgpu_nv_mailbox_ack_irq() argument
295 dev_dbg(adev->dev, "get ack intr and do nothing.\n"); in xgpu_nv_mailbox_ack_irq()
299 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, in xgpu_nv_set_mailbox_ack_irq() argument
316 static void xgpu_nv_ready_to_reset(struct amdgpu_device *adev) in xgpu_nv_ready_to_reset() argument
318 xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0); in xgpu_nv_ready_to_reset()
321 static int xgpu_nv_wait_reset(struct amdgpu_device *adev) in xgpu_nv_wait_reset() argument
325 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) { in xgpu_nv_wait_reset()
326 …dev_dbg(adev->dev, "Got NV IDH_FLR_NOTIFICATION_CMPL after %d ms\n", NV_MAILBOX_POLL_FLR_TIMEDOUT … in xgpu_nv_wait_reset()
333 dev_dbg(adev->dev, "waiting NV IDH_FLR_NOTIFICATION_CMPL timeout\n"); in xgpu_nv_wait_reset()
340 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); in xgpu_nv_mailbox_flr_work() local
343 amdgpu_virt_fini_data_exchange(adev); in xgpu_nv_mailbox_flr_work()
346 if (amdgpu_device_should_recover_gpu(adev) in xgpu_nv_mailbox_flr_work()
347 && (!amdgpu_device_has_job_running(adev) || in xgpu_nv_mailbox_flr_work()
348 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || in xgpu_nv_mailbox_flr_work()
349 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || in xgpu_nv_mailbox_flr_work()
350 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || in xgpu_nv_mailbox_flr_work()
351 adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) { in xgpu_nv_mailbox_flr_work()
354 reset_context.reset_req_dev = adev; in xgpu_nv_mailbox_flr_work()
358 amdgpu_device_gpu_recover(adev, NULL, &reset_context); in xgpu_nv_mailbox_flr_work()
365 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); in xgpu_nv_mailbox_bad_pages_work() local
367 if (down_read_trylock(&adev->reset_domain->sem)) { in xgpu_nv_mailbox_bad_pages_work()
368 amdgpu_virt_fini_data_exchange(adev); in xgpu_nv_mailbox_bad_pages_work()
369 amdgpu_virt_request_bad_pages(adev); in xgpu_nv_mailbox_bad_pages_work()
370 amdgpu_virt_init_data_exchange(adev); in xgpu_nv_mailbox_bad_pages_work()
371 up_read(&adev->reset_domain->sem); in xgpu_nv_mailbox_bad_pages_work()
375 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, in xgpu_nv_set_mailbox_rcv_irq() argument
392 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, in xgpu_nv_mailbox_rcv_irq() argument
396 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); in xgpu_nv_mailbox_rcv_irq()
397 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); in xgpu_nv_mailbox_rcv_irq()
401 xgpu_nv_mailbox_send_ack(adev); in xgpu_nv_mailbox_rcv_irq()
402 if (amdgpu_sriov_runtime(adev)) in xgpu_nv_mailbox_rcv_irq()
403 schedule_work(&adev->virt.bad_pages_work); in xgpu_nv_mailbox_rcv_irq()
406 xgpu_nv_mailbox_send_ack(adev); in xgpu_nv_mailbox_rcv_irq()
407 if (!amdgpu_ras_is_rma(adev)) { in xgpu_nv_mailbox_rcv_irq()
409 dev_err(adev->dev, "VF is in an unrecoverable state. Runtime Services are halted.\n"); in xgpu_nv_mailbox_rcv_irq()
412 if (amdgpu_sriov_runtime(adev)) in xgpu_nv_mailbox_rcv_irq()
413 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, in xgpu_nv_mailbox_rcv_irq()
414 &adev->virt.flr_work), in xgpu_nv_mailbox_rcv_irq()
419 if (amdgpu_sriov_runtime(adev)) in xgpu_nv_mailbox_rcv_irq()
420 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain, in xgpu_nv_mailbox_rcv_irq()
421 &adev->virt.flr_work), in xgpu_nv_mailbox_rcv_irq()
449 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) in xgpu_nv_mailbox_set_irq_funcs() argument
451 adev->virt.ack_irq.num_types = 1; in xgpu_nv_mailbox_set_irq_funcs()
452 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; in xgpu_nv_mailbox_set_irq_funcs()
453 adev->virt.rcv_irq.num_types = 1; in xgpu_nv_mailbox_set_irq_funcs()
454 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; in xgpu_nv_mailbox_set_irq_funcs()
457 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) in xgpu_nv_mailbox_add_irq_id() argument
461 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); in xgpu_nv_mailbox_add_irq_id()
465 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); in xgpu_nv_mailbox_add_irq_id()
467 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); in xgpu_nv_mailbox_add_irq_id()
474 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) in xgpu_nv_mailbox_get_irq() argument
478 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); in xgpu_nv_mailbox_get_irq()
481 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); in xgpu_nv_mailbox_get_irq()
483 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); in xgpu_nv_mailbox_get_irq()
487 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); in xgpu_nv_mailbox_get_irq()
488 INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work); in xgpu_nv_mailbox_get_irq()
493 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) in xgpu_nv_mailbox_put_irq() argument
495 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); in xgpu_nv_mailbox_put_irq()
496 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); in xgpu_nv_mailbox_put_irq()
499 static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, in xgpu_nv_ras_poison_handler() argument
502 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) { in xgpu_nv_ras_poison_handler()
503 xgpu_nv_send_access_requests(adev, IDH_RAS_POISON); in xgpu_nv_ras_poison_handler()
505 amdgpu_virt_fini_data_exchange(adev); in xgpu_nv_ras_poison_handler()
506 xgpu_nv_send_access_requests_with_param(adev, in xgpu_nv_ras_poison_handler()
511 static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev) in xgpu_nv_rcvd_ras_intr() argument
513 enum idh_event msg = xgpu_nv_mailbox_peek_msg(adev); in xgpu_nv_rcvd_ras_intr()
518 static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev) in xgpu_nv_req_ras_err_count() argument
520 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT); in xgpu_nv_req_ras_err_count()
523 static int xgpu_nv_req_ras_cper_dump(struct amdgpu_device *adev, u64 vf_rptr) in xgpu_nv_req_ras_cper_dump() argument
530 adev, IDH_REQ_RAS_CPER_DUMP, vf_rptr_hi, vf_rptr_lo, 0); in xgpu_nv_req_ras_cper_dump()
533 static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev) in xgpu_nv_req_ras_bad_pages() argument
535 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); in xgpu_nv_req_ras_bad_pages()