Lines Matching refs:accel_dev

34 static int validate_user_input(struct adf_accel_dev *accel_dev,  in validate_user_input()  argument
43 dev_notice(&GET_DEV(accel_dev), in validate_user_input()
53 dev_notice(&GET_DEV(accel_dev), in validate_user_input()
60 dev_notice(&GET_DEV(accel_dev), in validate_user_input()
66 dev_notice(&GET_DEV(accel_dev), in validate_user_input()
73 dev_notice(&GET_DEV(accel_dev), in validate_user_input()
82 static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) in validate_sla_id() argument
87 dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); in validate_sla_id()
91 sla = accel_dev->rate_limiting->sla[sla_id]; in validate_sla_id()
94 dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); in validate_sla_id()
99 dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); in validate_sla_id()
214 static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, in prepare_rp_ids() argument
218 u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; in prepare_rp_ids()
219 bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; in prepare_rp_ids()
221 u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; in prepare_rp_ids()
227 dev_notice(&GET_DEV(accel_dev), in prepare_rp_ids()
233 dev_notice(&GET_DEV(accel_dev), in prepare_rp_ids()
238 if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { in prepare_rp_ids()
239 dev_notice(&GET_DEV(accel_dev), in prepare_rp_ids()
263 static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, in assign_rps_to_leaf() argument
266 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in assign_rps_to_leaf()
267 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in assign_rps_to_leaf()
279 static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, in assign_leaf_to_cluster() argument
282 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in assign_leaf_to_cluster()
283 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in assign_leaf_to_cluster()
293 static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, in assign_cluster_to_root() argument
296 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in assign_cluster_to_root()
297 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in assign_cluster_to_root()
307 static void assign_node_to_parent(struct adf_accel_dev *accel_dev, in assign_node_to_parent() argument
312 assign_rps_to_leaf(accel_dev, sla, clear_assignment); in assign_node_to_parent()
313 assign_leaf_to_cluster(accel_dev, sla, clear_assignment); in assign_node_to_parent()
316 assign_cluster_to_root(accel_dev, sla, clear_assignment); in assign_node_to_parent()
482 struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); in get_next_free_node_id()
522 u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, in adf_rl_calculate_slice_tokens() argument
525 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; in adf_rl_calculate_slice_tokens()
526 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in adf_rl_calculate_slice_tokens()
534 avail_slice_cycles *= hw_data->get_svc_slice_cnt(accel_dev, svc_type); in adf_rl_calculate_slice_tokens()
543 static u32 adf_rl_get_num_svc_aes(struct adf_accel_dev *accel_dev, in adf_rl_get_num_svc_aes() argument
546 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; in adf_rl_get_num_svc_aes()
554 u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, in adf_rl_calculate_ae_cycles() argument
557 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; in adf_rl_calculate_ae_cycles()
558 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in adf_rl_calculate_ae_cycles()
565 avail_ae_cycles *= adf_rl_get_num_svc_aes(accel_dev, svc_type); in adf_rl_calculate_ae_cycles()
577 u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, in adf_rl_calculate_pci_bw() argument
580 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; in adf_rl_calculate_pci_bw()
617 static int add_new_sla_entry(struct adf_accel_dev *accel_dev, in add_new_sla_entry() argument
621 struct adf_rl *rl_data = accel_dev->rate_limiting; in add_new_sla_entry()
632 if (!adf_is_service_enabled(accel_dev, sla_in->srv)) { in add_new_sla_entry()
633 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
643 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
651 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
660 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
663 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
670 ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); in add_new_sla_entry()
672 dev_notice(&GET_DEV(accel_dev), in add_new_sla_entry()
690 static int initialize_default_nodes(struct adf_accel_dev *accel_dev) in initialize_default_nodes() argument
692 struct adf_rl *rl_data = accel_dev->rate_limiting; in initialize_default_nodes()
703 if (!adf_is_service_enabled(accel_dev, i)) in initialize_default_nodes()
710 ret = adf_rl_add_sla(accel_dev, &sla_in); in initialize_default_nodes()
724 ret = adf_rl_add_sla(accel_dev, &sla_in); in initialize_default_nodes()
750 assign_node_to_parent(rl_data->accel_dev, sla, true); in clear_sla()
751 adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); in clear_sla()
759 static void free_all_sla(struct adf_accel_dev *accel_dev) in free_all_sla() argument
761 struct adf_rl *rl_data = accel_dev->rate_limiting; in free_all_sla()
789 static int add_update_sla(struct adf_accel_dev *accel_dev, in add_update_sla() argument
792 struct adf_rl *rl_data = accel_dev->rate_limiting; in add_update_sla()
799 dev_warn(&GET_DEV(accel_dev), in add_update_sla()
807 ret = validate_user_input(accel_dev, sla_in, is_update); in add_update_sla()
812 ret = validate_sla_id(accel_dev, sla_in->sla_id); in add_update_sla()
819 ret = add_new_sla_entry(accel_dev, sla_in, &sla); in add_update_sla()
825 dev_notice(&GET_DEV(accel_dev), in add_update_sla()
835 assign_node_to_parent(accel_dev, sla, false); in add_update_sla()
836 ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); in add_update_sla()
838 dev_notice(&GET_DEV(accel_dev), in add_update_sla()
875 int adf_rl_add_sla(struct adf_accel_dev *accel_dev, in adf_rl_add_sla() argument
878 return add_update_sla(accel_dev, sla_in, false); in adf_rl_add_sla()
890 int adf_rl_update_sla(struct adf_accel_dev *accel_dev, in adf_rl_update_sla() argument
893 return add_update_sla(accel_dev, sla_in, true); in adf_rl_update_sla()
907 int adf_rl_get_sla(struct adf_accel_dev *accel_dev, in adf_rl_get_sla() argument
913 ret = validate_sla_id(accel_dev, sla_in->sla_id); in adf_rl_get_sla()
917 sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; in adf_rl_get_sla()
951 int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, in adf_rl_get_capability_remaining() argument
954 struct adf_rl *rl_data = accel_dev->rate_limiting; in adf_rl_get_capability_remaining()
961 if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { in adf_rl_get_capability_remaining()
992 int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) in adf_rl_remove_sla() argument
994 struct adf_rl *rl_data = accel_dev->rate_limiting; in adf_rl_remove_sla()
999 ret = validate_sla_id(accel_dev, sla_id); in adf_rl_remove_sla()
1006 dev_notice(&GET_DEV(accel_dev), in adf_rl_remove_sla()
1024 void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) in adf_rl_remove_sla_all() argument
1026 struct adf_rl *rl_data = accel_dev->rate_limiting; in adf_rl_remove_sla_all()
1049 int adf_rl_init(struct adf_accel_dev *accel_dev) in adf_rl_init() argument
1051 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in adf_rl_init()
1075 rl->device_data = &accel_dev->hw_device->rl_data; in adf_rl_init()
1076 rl->accel_dev = accel_dev; in adf_rl_init()
1078 accel_dev->rate_limiting = rl; in adf_rl_init()
1084 int adf_rl_start(struct adf_accel_dev *accel_dev) in adf_rl_start() argument
1086 struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; in adf_rl_start()
1087 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in adf_rl_start()
1088 u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; in adf_rl_start()
1091 if (!accel_dev->rate_limiting) { in adf_rl_start()
1097 dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n"); in adf_rl_start()
1107 ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); in adf_rl_start()
1109 dev_err(&GET_DEV(accel_dev), "initialization failed\n"); in adf_rl_start()
1113 ret = initialize_default_nodes(accel_dev); in adf_rl_start()
1115 dev_err(&GET_DEV(accel_dev), in adf_rl_start()
1120 ret = adf_sysfs_rl_add(accel_dev); in adf_rl_start()
1122 dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n"); in adf_rl_start()
1129 adf_sysfs_rl_rm(accel_dev); in adf_rl_start()
1131 adf_rl_remove_sla_all(accel_dev, true); in adf_rl_start()
1133 kfree(accel_dev->rate_limiting); in adf_rl_start()
1134 accel_dev->rate_limiting = NULL; in adf_rl_start()
1139 void adf_rl_stop(struct adf_accel_dev *accel_dev) in adf_rl_stop() argument
1141 if (!accel_dev->rate_limiting) in adf_rl_stop()
1144 adf_sysfs_rl_rm(accel_dev); in adf_rl_stop()
1145 free_all_sla(accel_dev); in adf_rl_stop()
1148 void adf_rl_exit(struct adf_accel_dev *accel_dev) in adf_rl_exit() argument
1150 if (!accel_dev->rate_limiting) in adf_rl_exit()
1153 kfree(accel_dev->rate_limiting); in adf_rl_exit()
1154 accel_dev->rate_limiting = NULL; in adf_rl_exit()