Lines Matching refs:accel_dev
64 static int adf_dev_init(struct adf_accel_dev *accel_dev) in adf_dev_init() argument
67 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_dev_init()
71 dev_err(&GET_DEV(accel_dev), in adf_dev_init()
76 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && in adf_dev_init()
77 !accel_dev->is_vf) { in adf_dev_init()
78 dev_err(&GET_DEV(accel_dev), "Device not configured\n"); in adf_dev_init()
82 if (adf_init_etr_data(accel_dev)) { in adf_dev_init()
83 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n"); in adf_dev_init()
87 if (hw_data->init_device && hw_data->init_device(accel_dev)) { in adf_dev_init()
88 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n"); in adf_dev_init()
92 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { in adf_dev_init()
93 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); in adf_dev_init()
97 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { in adf_dev_init()
98 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); in adf_dev_init()
103 hw_data->ring_to_svc_map = hw_data->get_ring_to_svc_map(accel_dev); in adf_dev_init()
105 if (adf_ae_init(accel_dev)) { in adf_dev_init()
106 dev_err(&GET_DEV(accel_dev), in adf_dev_init()
110 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); in adf_dev_init()
112 if (adf_ae_fw_load(accel_dev)) { in adf_dev_init()
113 dev_err(&GET_DEV(accel_dev), in adf_dev_init()
117 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); in adf_dev_init()
119 if (hw_data->alloc_irq(accel_dev)) { in adf_dev_init()
120 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n"); in adf_dev_init()
123 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); in adf_dev_init()
126 hw_data->ras_ops.enable_ras_errors(accel_dev); in adf_dev_init()
128 hw_data->enable_ints(accel_dev); in adf_dev_init()
129 hw_data->enable_error_correction(accel_dev); in adf_dev_init()
131 ret = hw_data->pfvf_ops.enable_comms(accel_dev); in adf_dev_init()
135 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && in adf_dev_init()
136 accel_dev->is_vf) { in adf_dev_init()
137 if (qat_crypto_vf_dev_config(accel_dev)) in adf_dev_init()
141 adf_heartbeat_init(accel_dev); in adf_dev_init()
142 ret = adf_rl_init(accel_dev); in adf_dev_init()
146 ret = adf_tl_init(accel_dev); in adf_dev_init()
156 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { in adf_dev_init()
157 dev_err(&GET_DEV(accel_dev), in adf_dev_init()
162 set_bit(accel_dev->accel_id, service->init_status); in adf_dev_init()
178 static int adf_dev_start(struct adf_accel_dev *accel_dev) in adf_dev_start() argument
180 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_dev_start()
184 set_bit(ADF_STATUS_STARTING, &accel_dev->status); in adf_dev_start()
186 if (adf_ae_start(accel_dev)) { in adf_dev_start()
187 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n"); in adf_dev_start()
190 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); in adf_dev_start()
192 if (hw_data->send_admin_init(accel_dev)) { in adf_dev_start()
193 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n"); in adf_dev_start()
198 ret = hw_data->measure_clock(accel_dev); in adf_dev_start()
200 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n"); in adf_dev_start()
207 hw_data->set_ssm_wdtimer(accel_dev); in adf_dev_start()
210 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) { in adf_dev_start()
211 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n"); in adf_dev_start()
216 ret = hw_data->start_timer(accel_dev); in adf_dev_start()
218 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n"); in adf_dev_start()
223 adf_heartbeat_start(accel_dev); in adf_dev_start()
224 ret = adf_rl_start(accel_dev); in adf_dev_start()
228 ret = adf_tl_start(accel_dev); in adf_dev_start()
233 if (service->event_hld(accel_dev, ADF_EVENT_START)) { in adf_dev_start()
234 dev_err(&GET_DEV(accel_dev), in adf_dev_start()
239 set_bit(accel_dev->accel_id, service->start_status); in adf_dev_start()
242 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); in adf_dev_start()
243 set_bit(ADF_STATUS_STARTED, &accel_dev->status); in adf_dev_start()
245 if (!list_empty(&accel_dev->crypto_list) && in adf_dev_start()
247 dev_err(&GET_DEV(accel_dev), in adf_dev_start()
249 set_bit(ADF_STATUS_STARTING, &accel_dev->status); in adf_dev_start()
250 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); in adf_dev_start()
253 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); in adf_dev_start()
255 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) { in adf_dev_start()
256 dev_err(&GET_DEV(accel_dev), in adf_dev_start()
258 set_bit(ADF_STATUS_STARTING, &accel_dev->status); in adf_dev_start()
259 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); in adf_dev_start()
262 set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); in adf_dev_start()
264 adf_dbgfs_add(accel_dev); in adf_dev_start()
265 adf_sysfs_start_ras(accel_dev); in adf_dev_start()
280 static void adf_dev_stop(struct adf_accel_dev *accel_dev) in adf_dev_stop() argument
282 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_dev_stop()
287 if (!adf_dev_started(accel_dev) && in adf_dev_stop()
288 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) in adf_dev_stop()
291 adf_tl_stop(accel_dev); in adf_dev_stop()
292 adf_rl_stop(accel_dev); in adf_dev_stop()
293 adf_dbgfs_rm(accel_dev); in adf_dev_stop()
294 adf_sysfs_stop_ras(accel_dev); in adf_dev_stop()
296 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); in adf_dev_stop()
297 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); in adf_dev_stop()
299 if (!list_empty(&accel_dev->crypto_list) && in adf_dev_stop()
300 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) { in adf_dev_stop()
304 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); in adf_dev_stop()
306 if (!list_empty(&accel_dev->compression_list) && in adf_dev_stop()
307 test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status)) in adf_dev_stop()
309 clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); in adf_dev_stop()
312 if (!test_bit(accel_dev->accel_id, service->start_status)) in adf_dev_stop()
314 ret = service->event_hld(accel_dev, ADF_EVENT_STOP); in adf_dev_stop()
316 clear_bit(accel_dev->accel_id, service->start_status); in adf_dev_stop()
319 clear_bit(accel_dev->accel_id, service->start_status); in adf_dev_stop()
324 hw_data->stop_timer(accel_dev); in adf_dev_stop()
326 hw_data->disable_iov(accel_dev); in adf_dev_stop()
331 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { in adf_dev_stop()
332 if (adf_ae_stop(accel_dev)) in adf_dev_stop()
333 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n"); in adf_dev_stop()
335 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); in adf_dev_stop()
346 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) in adf_dev_shutdown() argument
348 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_dev_shutdown()
352 dev_err(&GET_DEV(accel_dev), in adf_dev_shutdown()
357 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { in adf_dev_shutdown()
358 adf_ae_fw_release(accel_dev); in adf_dev_shutdown()
359 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); in adf_dev_shutdown()
362 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { in adf_dev_shutdown()
363 if (adf_ae_shutdown(accel_dev)) in adf_dev_shutdown()
364 dev_err(&GET_DEV(accel_dev), in adf_dev_shutdown()
368 &accel_dev->status); in adf_dev_shutdown()
372 if (!test_bit(accel_dev->accel_id, service->init_status)) in adf_dev_shutdown()
374 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) in adf_dev_shutdown()
375 dev_err(&GET_DEV(accel_dev), in adf_dev_shutdown()
379 clear_bit(accel_dev->accel_id, service->init_status); in adf_dev_shutdown()
382 adf_rl_exit(accel_dev); in adf_dev_shutdown()
385 hw_data->ras_ops.disable_ras_errors(accel_dev); in adf_dev_shutdown()
387 adf_heartbeat_shutdown(accel_dev); in adf_dev_shutdown()
389 adf_tl_shutdown(accel_dev); in adf_dev_shutdown()
391 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { in adf_dev_shutdown()
392 hw_data->free_irq(accel_dev); in adf_dev_shutdown()
393 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); in adf_dev_shutdown()
397 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) in adf_dev_shutdown()
398 adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC); in adf_dev_shutdown()
401 hw_data->exit_arb(accel_dev); in adf_dev_shutdown()
404 hw_data->exit_admin_comms(accel_dev); in adf_dev_shutdown()
406 adf_cleanup_etr_data(accel_dev); in adf_dev_shutdown()
408 adf_dev_restore(accel_dev); in adf_dev_shutdown()
411 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) in adf_dev_restarting_notify() argument
416 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) in adf_dev_restarting_notify()
417 dev_err(&GET_DEV(accel_dev), in adf_dev_restarting_notify()
424 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) in adf_dev_restarted_notify() argument
429 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) in adf_dev_restarted_notify()
430 dev_err(&GET_DEV(accel_dev), in adf_dev_restarted_notify()
437 void adf_error_notifier(struct adf_accel_dev *accel_dev) in adf_error_notifier() argument
442 if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) in adf_error_notifier()
443 dev_err(&GET_DEV(accel_dev), in adf_error_notifier()
449 int adf_dev_down(struct adf_accel_dev *accel_dev) in adf_dev_down() argument
453 if (!accel_dev) in adf_dev_down()
456 mutex_lock(&accel_dev->state_lock); in adf_dev_down()
458 adf_dev_stop(accel_dev); in adf_dev_down()
459 adf_dev_shutdown(accel_dev); in adf_dev_down()
461 mutex_unlock(&accel_dev->state_lock); in adf_dev_down()
466 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config) in adf_dev_up() argument
470 if (!accel_dev) in adf_dev_up()
473 mutex_lock(&accel_dev->state_lock); in adf_dev_up()
475 if (adf_dev_started(accel_dev)) { in adf_dev_up()
476 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n", in adf_dev_up()
477 accel_dev->accel_id); in adf_dev_up()
482 if (config && GET_HW_DATA(accel_dev)->dev_config) { in adf_dev_up()
483 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev); in adf_dev_up()
488 ret = adf_dev_init(accel_dev); in adf_dev_up()
492 ret = adf_dev_start(accel_dev); in adf_dev_up()
495 mutex_unlock(&accel_dev->state_lock); in adf_dev_up()
500 int adf_dev_restart(struct adf_accel_dev *accel_dev) in adf_dev_restart() argument
504 if (!accel_dev) in adf_dev_restart()
507 adf_dev_down(accel_dev); in adf_dev_restart()
509 ret = adf_dev_up(accel_dev, false); in adf_dev_restart()