1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Asynchronous Compression operations
4 *
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
8 */
9 #ifndef _CRYPTO_ACOMP_H
10 #define _CRYPTO_ACOMP_H
11
12 #include <linux/atomic.h>
13 #include <linux/args.h>
14 #include <linux/compiler_types.h>
15 #include <linux/container_of.h>
16 #include <linux/crypto.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock_types.h>
21 #include <linux/types.h>
22
23 /* Set this bit if source is virtual address instead of SG list. */
24 #define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002
25
26 /* Set this bit for if virtual address source cannot be used for DMA. */
27 #define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004
28
29 /* Set this bit if destination is virtual address instead of SG list. */
30 #define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008
31
32 /* Set this bit for if virtual address destination cannot be used for DMA. */
33 #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
34
35 /* Private flags that should not be touched by the user. */
36 #define CRYPTO_ACOMP_REQ_PRIVATE \
37 (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
38 CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
39
40 #define CRYPTO_ACOMP_DST_MAX 131072
41
42 #define MAX_SYNC_COMP_REQSIZE 0
43
44 #define ACOMP_REQUEST_ON_STACK(name, tfm) \
45 char __##name##_req[sizeof(struct acomp_req) + \
46 MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
47 struct acomp_req *name = acomp_request_on_stack_init( \
48 __##name##_req, (tfm))
49
50 #define ACOMP_REQUEST_CLONE(name, gfp) \
51 acomp_request_clone(name, sizeof(__##name##_req), gfp)
52
53 struct acomp_req;
54 struct folio;
55
56 struct acomp_req_chain {
57 crypto_completion_t compl;
58 void *data;
59 struct scatterlist ssg;
60 struct scatterlist dsg;
61 union {
62 const u8 *src;
63 struct folio *sfolio;
64 };
65 union {
66 u8 *dst;
67 struct folio *dfolio;
68 };
69 u32 flags;
70 };
71
72 /**
73 * struct acomp_req - asynchronous (de)compression request
74 *
75 * @base: Common attributes for asynchronous crypto requests
76 * @src: Source scatterlist
77 * @dst: Destination scatterlist
78 * @svirt: Source virtual address
79 * @dvirt: Destination virtual address
80 * @slen: Size of the input buffer
81 * @dlen: Size of the output buffer and number of bytes produced
82 * @chain: Private API code data, do not use
83 * @__ctx: Start of private context data
84 */
85 struct acomp_req {
86 struct crypto_async_request base;
87 union {
88 struct scatterlist *src;
89 const u8 *svirt;
90 };
91 union {
92 struct scatterlist *dst;
93 u8 *dvirt;
94 };
95 unsigned int slen;
96 unsigned int dlen;
97
98 struct acomp_req_chain chain;
99
100 void *__ctx[] CRYPTO_MINALIGN_ATTR;
101 };
102
103 /**
104 * struct crypto_acomp - user-instantiated objects which encapsulate
105 * algorithms and core processing logic
106 *
107 * @compress: Function performs a compress operation
108 * @decompress: Function performs a de-compress operation
109 * @reqsize: Context size for (de)compression requests
110 * @fb: Synchronous fallback tfm
111 * @base: Common crypto API algorithm data structure
112 */
113 struct crypto_acomp {
114 int (*compress)(struct acomp_req *req);
115 int (*decompress)(struct acomp_req *req);
116 unsigned int reqsize;
117 struct crypto_tfm base;
118 };
119
120 #define COMP_ALG_COMMON { \
121 struct crypto_alg base; \
122 }
123 struct comp_alg_common COMP_ALG_COMMON;
124
125 /**
126 * DOC: Asynchronous Compression API
127 *
128 * The Asynchronous Compression API is used with the algorithms of type
129 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
130 */
131
132 /**
133 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
134 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
135 * compression algorithm e.g. "deflate"
136 * @type: specifies the type of the algorithm
137 * @mask: specifies the mask for the algorithm
138 *
139 * Allocate a handle for a compression algorithm. The returned struct
140 * crypto_acomp is the handle that is required for any subsequent
141 * API invocation for the compression operations.
142 *
143 * Return: allocated handle in case of success; IS_ERR() is true in case
144 * of an error, PTR_ERR() returns the error code.
145 */
146 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
147 u32 mask);
148 /**
149 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
150 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
151 * compression algorithm e.g. "deflate"
152 * @type: specifies the type of the algorithm
153 * @mask: specifies the mask for the algorithm
154 * @node: specifies the NUMA node the ZIP hardware belongs to
155 *
156 * Allocate a handle for a compression algorithm. Drivers should try to use
157 * (de)compressors on the specified NUMA node.
158 * The returned struct crypto_acomp is the handle that is required for any
159 * subsequent API invocation for the compression operations.
160 *
161 * Return: allocated handle in case of success; IS_ERR() is true in case
162 * of an error, PTR_ERR() returns the error code.
163 */
164 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
165 u32 mask, int node);
166
crypto_acomp_tfm(struct crypto_acomp * tfm)167 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
168 {
169 return &tfm->base;
170 }
171
__crypto_comp_alg_common(struct crypto_alg * alg)172 static inline struct comp_alg_common *__crypto_comp_alg_common(
173 struct crypto_alg *alg)
174 {
175 return container_of(alg, struct comp_alg_common, base);
176 }
177
__crypto_acomp_tfm(struct crypto_tfm * tfm)178 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
179 {
180 return container_of(tfm, struct crypto_acomp, base);
181 }
182
crypto_comp_alg_common(struct crypto_acomp * tfm)183 static inline struct comp_alg_common *crypto_comp_alg_common(
184 struct crypto_acomp *tfm)
185 {
186 return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
187 }
188
crypto_acomp_reqsize(struct crypto_acomp * tfm)189 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
190 {
191 return tfm->reqsize;
192 }
193
acomp_request_set_tfm(struct acomp_req * req,struct crypto_acomp * tfm)194 static inline void acomp_request_set_tfm(struct acomp_req *req,
195 struct crypto_acomp *tfm)
196 {
197 crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
198 }
199
acomp_is_async(struct crypto_acomp * tfm)200 static inline bool acomp_is_async(struct crypto_acomp *tfm)
201 {
202 return crypto_comp_alg_common(tfm)->base.cra_flags &
203 CRYPTO_ALG_ASYNC;
204 }
205
crypto_acomp_reqtfm(struct acomp_req * req)206 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
207 {
208 return __crypto_acomp_tfm(req->base.tfm);
209 }
210
211 /**
212 * crypto_free_acomp() -- free ACOMPRESS tfm handle
213 *
214 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
215 *
216 * If @tfm is a NULL or error pointer, this function does nothing.
217 */
crypto_free_acomp(struct crypto_acomp * tfm)218 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
219 {
220 crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
221 }
222
crypto_has_acomp(const char * alg_name,u32 type,u32 mask)223 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
224 {
225 type &= ~CRYPTO_ALG_TYPE_MASK;
226 type |= CRYPTO_ALG_TYPE_ACOMPRESS;
227 mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
228
229 return crypto_has_alg(alg_name, type, mask);
230 }
231
crypto_acomp_alg_name(struct crypto_acomp * tfm)232 static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
233 {
234 return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
235 }
236
crypto_acomp_driver_name(struct crypto_acomp * tfm)237 static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
238 {
239 return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
240 }
241
242 /**
243 * acomp_request_alloc() -- allocates asynchronous (de)compression request
244 *
245 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
246 * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
247 *
248 * Return: allocated handle in case of success or NULL in case of an error
249 */
acomp_request_alloc_extra_noprof(struct crypto_acomp * tfm,size_t extra,gfp_t gfp)250 static inline struct acomp_req *acomp_request_alloc_extra_noprof(
251 struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
252 {
253 struct acomp_req *req;
254 size_t len;
255
256 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
257 if (check_add_overflow(len, extra, &len))
258 return NULL;
259
260 req = kzalloc_noprof(len, gfp);
261 if (likely(req))
262 acomp_request_set_tfm(req, tfm);
263 return req;
264 }
265 #define acomp_request_alloc_noprof(tfm, ...) \
266 CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
267 tfm, ##__VA_ARGS__)
268 #define acomp_request_alloc_noprof_0(tfm) \
269 acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
270 #define acomp_request_alloc_noprof_1(tfm, gfp) \
271 acomp_request_alloc_extra_noprof(tfm, 0, gfp)
272 #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
273
274 /**
275 * acomp_request_alloc_extra() -- allocate acomp request with extra memory
276 *
277 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
278 * @extra: amount of extra memory
279 * @gfp: gfp to pass to kzalloc
280 *
281 * Return: allocated handle in case of success or NULL in case of an error
282 */
283 #define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
284
acomp_request_extra(struct acomp_req * req)285 static inline void *acomp_request_extra(struct acomp_req *req)
286 {
287 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
288 size_t len;
289
290 len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
291 return (void *)((char *)req + len);
292 }
293
acomp_req_on_stack(struct acomp_req * req)294 static inline bool acomp_req_on_stack(struct acomp_req *req)
295 {
296 return crypto_req_on_stack(&req->base);
297 }
298
299 /**
300 * acomp_request_free() -- zeroize and free asynchronous (de)compression
301 * request as well as the output buffer if allocated
302 * inside the algorithm
303 *
304 * @req: request to free
305 */
acomp_request_free(struct acomp_req * req)306 static inline void acomp_request_free(struct acomp_req *req)
307 {
308 if (!req || acomp_req_on_stack(req))
309 return;
310 kfree_sensitive(req);
311 }
312
313 /**
314 * acomp_request_set_callback() -- Sets an asynchronous callback
315 *
316 * Callback will be called when an asynchronous operation on a given
317 * request is finished.
318 *
319 * @req: request that the callback will be set for
320 * @flgs: specify for instance if the operation may backlog
321 * @cmlp: callback which will be called
322 * @data: private data used by the caller
323 */
acomp_request_set_callback(struct acomp_req * req,u32 flgs,crypto_completion_t cmpl,void * data)324 static inline void acomp_request_set_callback(struct acomp_req *req,
325 u32 flgs,
326 crypto_completion_t cmpl,
327 void *data)
328 {
329 flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
330 flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
331 crypto_request_set_callback(&req->base, flgs, cmpl, data);
332 }
333
334 /**
335 * acomp_request_set_params() -- Sets request parameters
336 *
337 * Sets parameters required by an acomp operation
338 *
339 * @req: asynchronous compress request
340 * @src: pointer to input buffer scatterlist
341 * @dst: pointer to output buffer scatterlist. If this is NULL, the
342 * acomp layer will allocate the output memory
343 * @slen: size of the input buffer
344 * @dlen: size of the output buffer. If dst is NULL, this can be used by
345 * the user to specify the maximum amount of memory to allocate
346 */
acomp_request_set_params(struct acomp_req * req,struct scatterlist * src,struct scatterlist * dst,unsigned int slen,unsigned int dlen)347 static inline void acomp_request_set_params(struct acomp_req *req,
348 struct scatterlist *src,
349 struct scatterlist *dst,
350 unsigned int slen,
351 unsigned int dlen)
352 {
353 req->src = src;
354 req->dst = dst;
355 req->slen = slen;
356 req->dlen = dlen;
357
358 req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
359 CRYPTO_ACOMP_REQ_SRC_NONDMA |
360 CRYPTO_ACOMP_REQ_DST_VIRT |
361 CRYPTO_ACOMP_REQ_DST_NONDMA);
362 }
363
364 /**
365 * acomp_request_set_src_sg() -- Sets source scatterlist
366 *
367 * Sets source scatterlist required by an acomp operation.
368 *
369 * @req: asynchronous compress request
370 * @src: pointer to input buffer scatterlist
371 * @slen: size of the input buffer
372 */
acomp_request_set_src_sg(struct acomp_req * req,struct scatterlist * src,unsigned int slen)373 static inline void acomp_request_set_src_sg(struct acomp_req *req,
374 struct scatterlist *src,
375 unsigned int slen)
376 {
377 req->src = src;
378 req->slen = slen;
379
380 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
381 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
382 }
383
384 /**
385 * acomp_request_set_src_dma() -- Sets DMA source virtual address
386 *
387 * Sets source virtual address required by an acomp operation.
388 * The address must be usable for DMA.
389 *
390 * @req: asynchronous compress request
391 * @src: virtual address pointer to input buffer
392 * @slen: size of the input buffer
393 */
acomp_request_set_src_dma(struct acomp_req * req,const u8 * src,unsigned int slen)394 static inline void acomp_request_set_src_dma(struct acomp_req *req,
395 const u8 *src, unsigned int slen)
396 {
397 req->svirt = src;
398 req->slen = slen;
399
400 req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
401 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
402 }
403
404 /**
405 * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
406 *
407 * Sets source virtual address required by an acomp operation.
408 * The address can not be used for DMA.
409 *
410 * @req: asynchronous compress request
411 * @src: virtual address pointer to input buffer
412 * @slen: size of the input buffer
413 */
acomp_request_set_src_nondma(struct acomp_req * req,const u8 * src,unsigned int slen)414 static inline void acomp_request_set_src_nondma(struct acomp_req *req,
415 const u8 *src,
416 unsigned int slen)
417 {
418 req->svirt = src;
419 req->slen = slen;
420
421 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
422 req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
423 }
424
425 /**
426 * acomp_request_set_src_folio() -- Sets source folio
427 *
428 * Sets source folio required by an acomp operation.
429 *
430 * @req: asynchronous compress request
431 * @folio: pointer to input folio
432 * @off: input folio offset
433 * @len: size of the input buffer
434 */
acomp_request_set_src_folio(struct acomp_req * req,struct folio * folio,size_t off,unsigned int len)435 static inline void acomp_request_set_src_folio(struct acomp_req *req,
436 struct folio *folio, size_t off,
437 unsigned int len)
438 {
439 sg_init_table(&req->chain.ssg, 1);
440 sg_set_folio(&req->chain.ssg, folio, len, off);
441 acomp_request_set_src_sg(req, &req->chain.ssg, len);
442 }
443
444 /**
445 * acomp_request_set_dst_sg() -- Sets destination scatterlist
446 *
447 * Sets destination scatterlist required by an acomp operation.
448 *
449 * @req: asynchronous compress request
450 * @dst: pointer to output buffer scatterlist
451 * @dlen: size of the output buffer
452 */
acomp_request_set_dst_sg(struct acomp_req * req,struct scatterlist * dst,unsigned int dlen)453 static inline void acomp_request_set_dst_sg(struct acomp_req *req,
454 struct scatterlist *dst,
455 unsigned int dlen)
456 {
457 req->dst = dst;
458 req->dlen = dlen;
459
460 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
461 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
462 }
463
464 /**
465 * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
466 *
467 * Sets destination virtual address required by an acomp operation.
468 * The address must be usable for DMA.
469 *
470 * @req: asynchronous compress request
471 * @dst: virtual address pointer to output buffer
472 * @dlen: size of the output buffer
473 */
acomp_request_set_dst_dma(struct acomp_req * req,u8 * dst,unsigned int dlen)474 static inline void acomp_request_set_dst_dma(struct acomp_req *req,
475 u8 *dst, unsigned int dlen)
476 {
477 req->dvirt = dst;
478 req->dlen = dlen;
479
480 req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
481 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
482 }
483
484 /**
485 * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
486 *
487 * Sets destination virtual address required by an acomp operation.
488 * The address can not be used for DMA.
489 *
490 * @req: asynchronous compress request
491 * @dst: virtual address pointer to output buffer
492 * @dlen: size of the output buffer
493 */
acomp_request_set_dst_nondma(struct acomp_req * req,u8 * dst,unsigned int dlen)494 static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
495 u8 *dst, unsigned int dlen)
496 {
497 req->dvirt = dst;
498 req->dlen = dlen;
499
500 req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
501 req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
502 }
503
504 /**
505 * acomp_request_set_dst_folio() -- Sets destination folio
506 *
507 * Sets destination folio required by an acomp operation.
508 *
509 * @req: asynchronous compress request
510 * @folio: pointer to input folio
511 * @off: input folio offset
512 * @len: size of the input buffer
513 */
acomp_request_set_dst_folio(struct acomp_req * req,struct folio * folio,size_t off,unsigned int len)514 static inline void acomp_request_set_dst_folio(struct acomp_req *req,
515 struct folio *folio, size_t off,
516 unsigned int len)
517 {
518 sg_init_table(&req->chain.dsg, 1);
519 sg_set_folio(&req->chain.dsg, folio, len, off);
520 acomp_request_set_dst_sg(req, &req->chain.dsg, len);
521 }
522
523 /**
524 * crypto_acomp_compress() -- Invoke asynchronous compress operation
525 *
526 * Function invokes the asynchronous compress operation
527 *
528 * @req: asynchronous compress request
529 *
530 * Return: zero on success; error code in case of error
531 */
532 int crypto_acomp_compress(struct acomp_req *req);
533
534 /**
535 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
536 *
537 * Function invokes the asynchronous decompress operation
538 *
539 * @req: asynchronous compress request
540 *
541 * Return: zero on success; error code in case of error
542 */
543 int crypto_acomp_decompress(struct acomp_req *req);
544
acomp_request_on_stack_init(char * buf,struct crypto_acomp * tfm)545 static inline struct acomp_req *acomp_request_on_stack_init(
546 char *buf, struct crypto_acomp *tfm)
547 {
548 struct acomp_req *req = (void *)buf;
549
550 crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
551 return req;
552 }
553
554 struct acomp_req *acomp_request_clone(struct acomp_req *req,
555 size_t total, gfp_t gfp);
556
557 #endif
558