1 // SPDX-License-Identifier: GPL-2.0+
2
3 #define LOG_CATEGORY UCLASS_AES
4
5 #include <dm.h>
6 #include <malloc.h>
7 #include <log.h>
8 #include <uboot_aes.h>
9 #include <linux/string.h>
10
dm_aes_get_available_key_slots(struct udevice * dev)11 int dm_aes_get_available_key_slots(struct udevice *dev)
12 {
13 const struct aes_ops *ops;
14
15 if (!dev)
16 return -ENODEV;
17
18 ops = aes_get_ops(dev);
19
20 if (!ops->available_key_slots)
21 return -ENOSYS;
22
23 return ops->available_key_slots(dev);
24 }
25
dm_aes_select_key_slot(struct udevice * dev,u32 key_size,u8 slot)26 int dm_aes_select_key_slot(struct udevice *dev, u32 key_size, u8 slot)
27 {
28 const struct aes_ops *ops;
29
30 if (!dev)
31 return -ENODEV;
32
33 ops = aes_get_ops(dev);
34
35 if (!ops->select_key_slot)
36 return -ENOSYS;
37
38 return ops->select_key_slot(dev, key_size, slot);
39 }
40
dm_aes_set_key_for_key_slot(struct udevice * dev,u32 key_size,u8 * key,u8 slot)41 int dm_aes_set_key_for_key_slot(struct udevice *dev, u32 key_size, u8 *key, u8 slot)
42 {
43 const struct aes_ops *ops;
44
45 if (!dev)
46 return -ENODEV;
47
48 ops = aes_get_ops(dev);
49
50 if (!ops->set_key_for_key_slot)
51 return -ENOSYS;
52
53 return ops->set_key_for_key_slot(dev, key_size, key, slot);
54 }
55
dm_aes_ecb_encrypt(struct udevice * dev,u8 * src,u8 * dst,u32 num_aes_blocks)56 int dm_aes_ecb_encrypt(struct udevice *dev, u8 *src, u8 *dst, u32 num_aes_blocks)
57 {
58 const struct aes_ops *ops;
59
60 if (!dev)
61 return -ENODEV;
62
63 ops = aes_get_ops(dev);
64
65 if (!ops->aes_ecb_encrypt)
66 return -ENOSYS;
67
68 return ops->aes_ecb_encrypt(dev, src, dst, num_aes_blocks);
69 }
70
dm_aes_ecb_decrypt(struct udevice * dev,u8 * src,u8 * dst,u32 num_aes_blocks)71 int dm_aes_ecb_decrypt(struct udevice *dev, u8 *src, u8 *dst, u32 num_aes_blocks)
72 {
73 const struct aes_ops *ops;
74
75 if (!dev)
76 return -ENODEV;
77
78 ops = aes_get_ops(dev);
79
80 if (!ops->aes_ecb_decrypt)
81 return -ENOSYS;
82
83 return ops->aes_ecb_decrypt(dev, src, dst, num_aes_blocks);
84 }
85
dm_aes_cbc_encrypt(struct udevice * dev,u8 * iv,u8 * src,u8 * dst,u32 num_aes_blocks)86 int dm_aes_cbc_encrypt(struct udevice *dev, u8 *iv, u8 *src, u8 *dst, u32 num_aes_blocks)
87 {
88 const struct aes_ops *ops;
89
90 if (!dev)
91 return -ENODEV;
92
93 ops = aes_get_ops(dev);
94
95 if (!ops->aes_cbc_encrypt)
96 return -ENOSYS;
97
98 return ops->aes_cbc_encrypt(dev, iv, src, dst, num_aes_blocks);
99 }
100
dm_aes_cbc_decrypt(struct udevice * dev,u8 * iv,u8 * src,u8 * dst,u32 num_aes_blocks)101 int dm_aes_cbc_decrypt(struct udevice *dev, u8 *iv, u8 *src, u8 *dst, u32 num_aes_blocks)
102 {
103 const struct aes_ops *ops;
104
105 if (!dev)
106 return -ENODEV;
107
108 ops = aes_get_ops(dev);
109
110 if (!ops->aes_cbc_decrypt)
111 return -ENOSYS;
112
113 return ops->aes_cbc_decrypt(dev, iv, src, dst, num_aes_blocks);
114 }
115
left_shift_vector(u8 * in,u8 * out,int size)116 static void left_shift_vector(u8 *in, u8 *out, int size)
117 {
118 int carry = 0;
119 int i;
120
121 for (i = size - 1; i >= 0; i--) {
122 out[i] = (in[i] << 1) | carry;
123 carry = in[i] >> 7; /* get most significant bit */
124 }
125 }
126
dm_aes_cmac(struct udevice * dev,u8 * src,u8 * dst,u32 num_aes_blocks)127 int dm_aes_cmac(struct udevice *dev, u8 *src, u8 *dst, u32 num_aes_blocks)
128 {
129 const u8 AES_CMAC_CONST_RB = 0x87; /* from RFC 4493, Figure 2.2 */
130 const u32 TMP_BUFFER_LEN = 128;
131 u8 tmp_block[AES128_KEY_LENGTH] = { };
132 u8 k1[AES128_KEY_LENGTH];
133 u8 *tmp_buffer;
134 int ret;
135
136 log_debug("%s: 0x%p -> %p blocks %d\n", __func__, src, dst, num_aes_blocks);
137
138 if (!num_aes_blocks) {
139 log_debug("%s: called with 0 blocks!\n", __func__);
140 return -1;
141 }
142
143 /* Compute K1 constant needed by AES-CMAC calculation */
144 ret = dm_aes_cbc_encrypt(dev, (u8 *)AES_ZERO_BLOCK, (u8 *)AES_ZERO_BLOCK, tmp_block, 1);
145 if (ret)
146 return -1;
147
148 left_shift_vector(tmp_block, k1, AES_BLOCK_LENGTH);
149
150 if ((tmp_block[0] >> 7) != 0) /* get MSB of L */
151 k1[AES128_KEY_LENGTH - 1] ^= AES_CMAC_CONST_RB;
152
153 /* Set what will be the initial IV as zero */
154 memset(tmp_block, 0, AES_BLOCK_LENGTH);
155
156 /* Process all blocks except last by calling engine several times per dma buffer size */
157 if (num_aes_blocks > 1) {
158 tmp_buffer = malloc(AES_BLOCK_LENGTH * min(num_aes_blocks - 1, TMP_BUFFER_LEN));
159 while (num_aes_blocks > 1) {
160 u32 blocks = min(num_aes_blocks - 1, TMP_BUFFER_LEN);
161
162 /* Encrypt the current remaining set of blocks that fits in tmp buffer */
163 ret = dm_aes_cbc_encrypt(dev, tmp_block, src, tmp_buffer, blocks);
164 if (ret)
165 return -1;
166
167 num_aes_blocks -= blocks;
168 src += blocks * AES_BLOCK_LENGTH;
169
170 /* Copy the last encrypted block to tmp_block as IV */
171 memcpy(tmp_block, tmp_buffer + ((blocks - 1) * AES_BLOCK_LENGTH),
172 AES_BLOCK_LENGTH);
173 }
174 free(tmp_buffer);
175 }
176
177 if (num_aes_blocks != 1) {
178 log_debug("%s: left with %d blocks! must be 1\n", __func__, num_aes_blocks);
179 return -1;
180 }
181
182 /* XOR last IV with K1 */
183 aes_apply_cbc_chain_data(tmp_block, k1, tmp_block);
184
185 /* Encrypt the last src block already with tmp_block as IV and output to dst */
186 return dm_aes_cbc_encrypt(dev, tmp_block, src, dst, 1);
187 }
188
189 UCLASS_DRIVER(aes) = {
190 .id = UCLASS_AES,
191 .name = "aes",
192 };
193