Lines Matching refs:ccp

27 	struct ccp_device *ccp;  in ccp_lsb_alloc()  local
42 ccp = cmd_q->ccp; in ccp_lsb_alloc()
44 mutex_lock(&ccp->sb_mutex); in ccp_lsb_alloc()
46 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, in ccp_lsb_alloc()
51 bitmap_set(ccp->lsbmap, start, count); in ccp_lsb_alloc()
53 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_alloc()
57 ccp->sb_avail = 0; in ccp_lsb_alloc()
59 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_alloc()
62 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) in ccp_lsb_alloc()
81 struct ccp_device *ccp = cmd_q->ccp; in ccp_lsb_free() local
83 mutex_lock(&ccp->sb_mutex); in ccp_lsb_free()
84 bitmap_clear(ccp->lsbmap, start, count); in ccp_lsb_free()
85 ccp->sb_avail = 1; in ccp_lsb_free()
86 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_free()
87 wake_up_interruptible_all(&ccp->sb_queue); in ccp_lsb_free()
265 ccp_log_error(cmd_q->ccp, in ccp5_do_cmd()
602 dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", in ccp_find_lsb_regions()
608 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, in ccp_find_and_assign_lsb_to_q() argument
627 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_find_and_assign_lsb_to_q()
628 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp_find_and_assign_lsb_to_q()
643 dev_dbg(ccp->dev, in ccp_find_and_assign_lsb_to_q()
665 static int ccp_assign_lsbs(struct ccp_device *ccp) in ccp_assign_lsbs() argument
677 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_assign_lsbs()
679 lsb_pub, ccp->cmd_q[i].lsbmask, in ccp_assign_lsbs()
684 if (n_lsbs >= ccp->cmd_q_count) { in ccp_assign_lsbs()
694 rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, in ccp_assign_lsbs()
712 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); in ccp_assign_lsbs()
720 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) in ccp5_disable_queue_interrupts() argument
724 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_disable_queue_interrupts()
725 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); in ccp5_disable_queue_interrupts()
728 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) in ccp5_enable_queue_interrupts() argument
732 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_enable_queue_interrupts()
733 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); in ccp5_enable_queue_interrupts()
738 struct ccp_device *ccp = (struct ccp_device *)data; in ccp5_irq_bh() local
742 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_irq_bh()
743 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp5_irq_bh()
763 ccp5_enable_queue_interrupts(ccp); in ccp5_irq_bh()
768 struct ccp_device *ccp = (struct ccp_device *)data; in ccp5_irq_handler() local
770 ccp5_disable_queue_interrupts(ccp); in ccp5_irq_handler()
771 ccp->total_interrupts++; in ccp5_irq_handler()
772 if (ccp->use_tasklet) in ccp5_irq_handler()
773 tasklet_schedule(&ccp->irq_tasklet); in ccp5_irq_handler()
775 ccp5_irq_bh((unsigned long)ccp); in ccp5_irq_handler()
779 static int ccp5_init(struct ccp_device *ccp) in ccp5_init() argument
781 struct device *dev = ccp->dev; in ccp5_init()
791 qmr = ioread32(ccp->io_regs + Q_MASK_REG); in ccp5_init()
804 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { in ccp5_init()
810 ccp->name, i); in ccp5_init()
820 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; in ccp5_init()
821 ccp->cmd_q_count++; in ccp5_init()
823 cmd_q->ccp = ccp; in ccp5_init()
844 cmd_q->reg_control = ccp->io_regs + in ccp5_init()
867 if (ccp->cmd_q_count == 0) { in ccp5_init()
874 ccp5_disable_queue_interrupts(ccp); in ccp5_init()
875 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
876 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
890 ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); in ccp5_init()
896 if (ccp->use_tasklet) in ccp5_init()
897 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, in ccp5_init()
898 (unsigned long)ccp); in ccp5_init()
902 status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); in ccp5_init()
903 status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); in ccp5_init()
904 iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); in ccp5_init()
905 iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); in ccp5_init()
910 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
914 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
934 ret = ccp_assign_lsbs(ccp); in ccp5_init()
941 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
942 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); in ccp5_init()
943 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); in ccp5_init()
948 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
951 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
954 "%s-q%u", ccp->name, cmd_q->id); in ccp5_init()
966 ccp5_enable_queue_interrupts(ccp); in ccp5_init()
970 ccp_add_device(ccp); in ccp5_init()
972 ret = ccp_register_rng(ccp); in ccp5_init()
977 ret = ccp_dmaengine_register(ccp); in ccp5_init()
983 ccp5_debugfs_setup(ccp); in ccp5_init()
989 ccp_unregister_rng(ccp); in ccp5_init()
992 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_init()
993 if (ccp->cmd_q[i].kthread) in ccp5_init()
994 kthread_stop(ccp->cmd_q[i].kthread); in ccp5_init()
997 sp_free_ccp_irq(ccp->sp, ccp); in ccp5_init()
1000 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_init()
1001 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp5_init()
1006 static void ccp5_destroy(struct ccp_device *ccp) in ccp5_destroy() argument
1013 ccp_dmaengine_unregister(ccp); in ccp5_destroy()
1016 ccp_unregister_rng(ccp); in ccp5_destroy()
1019 ccp_del_device(ccp); in ccp5_destroy()
1030 ccp5_disable_queue_interrupts(ccp); in ccp5_destroy()
1031 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_destroy()
1032 cmd_q = &ccp->cmd_q[i]; in ccp5_destroy()
1044 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_destroy()
1045 if (ccp->cmd_q[i].kthread) in ccp5_destroy()
1046 kthread_stop(ccp->cmd_q[i].kthread); in ccp5_destroy()
1048 sp_free_ccp_irq(ccp->sp, ccp); in ccp5_destroy()
1051 while (!list_empty(&ccp->cmd)) { in ccp5_destroy()
1053 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); in ccp5_destroy()
1057 while (!list_empty(&ccp->backlog)) { in ccp5_destroy()
1059 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); in ccp5_destroy()
1065 static void ccp5_config(struct ccp_device *ccp) in ccp5_config() argument
1068 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); in ccp5_config()
1071 static void ccp5other_config(struct ccp_device *ccp) in ccp5other_config() argument
1078 iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); in ccp5other_config()
1079 iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); in ccp5other_config()
1081 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); in ccp5other_config()
1082 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); in ccp5other_config()
1085 iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); in ccp5other_config()
1086 iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); in ccp5other_config()
1087 iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); in ccp5other_config()
1089 iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); in ccp5other_config()
1090 iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); in ccp5other_config()
1092 iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); in ccp5other_config()
1094 ccp5_config(ccp); in ccp5other_config()