1/*
2 * Scatterlist Cryptographic API.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
7 *
8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
9 * and Nettle, by Niels Möller.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17#ifndef _LINUX_CRYPTO_H
18#define _LINUX_CRYPTO_H
19
20#include <linux/atomic.h>
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/bug.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/uaccess.h>
27
28/*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37#define MODULE_ALIAS_CRYPTO(name)	\
38		__MODULE_INFO(alias, alias_userspace, name);	\
39		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
41/*
42 * Algorithm masks and types.
43 */
44#define CRYPTO_ALG_TYPE_MASK		0x0000000f
45#define CRYPTO_ALG_TYPE_CIPHER		0x00000001
46#define CRYPTO_ALG_TYPE_COMPRESS	0x00000002
47#define CRYPTO_ALG_TYPE_AEAD		0x00000003
48#define CRYPTO_ALG_TYPE_BLKCIPHER	0x00000004
49#define CRYPTO_ALG_TYPE_ABLKCIPHER	0x00000005
50#define CRYPTO_ALG_TYPE_GIVCIPHER	0x00000006
51#define CRYPTO_ALG_TYPE_DIGEST		0x00000008
52#define CRYPTO_ALG_TYPE_HASH		0x00000008
53#define CRYPTO_ALG_TYPE_SHASH		0x00000009
54#define CRYPTO_ALG_TYPE_AHASH		0x0000000a
55#define CRYPTO_ALG_TYPE_RNG		0x0000000c
56#define CRYPTO_ALG_TYPE_PCOMPRESS	0x0000000f
57
58#define CRYPTO_ALG_TYPE_HASH_MASK	0x0000000e
59#define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000c
60#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK	0x0000000c
61
62#define CRYPTO_ALG_LARVAL		0x00000010
63#define CRYPTO_ALG_DEAD			0x00000020
64#define CRYPTO_ALG_DYING		0x00000040
65#define CRYPTO_ALG_ASYNC		0x00000080
66
67/*
68 * Set this bit if and only if the algorithm requires another algorithm of
69 * the same type to handle corner cases.
70 */
71#define CRYPTO_ALG_NEED_FALLBACK	0x00000100
72
73/*
74 * This bit is set for symmetric key ciphers that have already been wrapped
75 * with a generic IV generator to prevent them from being wrapped again.
76 */
77#define CRYPTO_ALG_GENIV		0x00000200
78
79/*
80 * Set if the algorithm has passed automated run-time testing.  Note that
81 * if there is no run-time testing for a given algorithm it is considered
82 * to have passed.
83 */
84
85#define CRYPTO_ALG_TESTED		0x00000400
86
87/*
88 * Set if the algorithm is an instance that is build from templates.
89 */
90#define CRYPTO_ALG_INSTANCE		0x00000800
91
92/* Set this bit if the algorithm provided is hardware accelerated but
93 * not available to userspace via instruction set or so.
94 */
95#define CRYPTO_ALG_KERN_DRIVER_ONLY	0x00001000
96
97/*
98 * Mark a cipher as a service implementation only usable by another
99 * cipher and never by a normal user of the kernel crypto API
100 */
101#define CRYPTO_ALG_INTERNAL		0x00002000
102
103/*
104 * Transform masks and values (for crt_flags).
105 */
106#define CRYPTO_TFM_REQ_MASK		0x000fff00
107#define CRYPTO_TFM_RES_MASK		0xfff00000
108
109#define CRYPTO_TFM_REQ_WEAK_KEY		0x00000100
110#define CRYPTO_TFM_REQ_MAY_SLEEP	0x00000200
111#define CRYPTO_TFM_REQ_MAY_BACKLOG	0x00000400
112#define CRYPTO_TFM_RES_WEAK_KEY		0x00100000
113#define CRYPTO_TFM_RES_BAD_KEY_LEN   	0x00200000
114#define CRYPTO_TFM_RES_BAD_KEY_SCHED 	0x00400000
115#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 	0x00800000
116#define CRYPTO_TFM_RES_BAD_FLAGS 	0x01000000
117
118/*
119 * Miscellaneous stuff.
120 */
121#define CRYPTO_MAX_ALG_NAME		64
122
123/*
124 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
125 * declaration) is used to ensure that the crypto_tfm context structure is
126 * aligned correctly for the given architecture so that there are no alignment
127 * faults for C data types.  In particular, this is required on platforms such
128 * as arm where pointers are 32-bit aligned but there are data types such as
129 * u64 which require 64-bit alignment.
130 */
131#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
132
133#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
134
135struct scatterlist;
136struct crypto_ablkcipher;
137struct crypto_async_request;
138struct crypto_aead;
139struct crypto_blkcipher;
140struct crypto_hash;
141struct crypto_rng;
142struct crypto_tfm;
143struct crypto_type;
144struct aead_givcrypt_request;
145struct skcipher_givcrypt_request;
146
147typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
148
149/**
150 * DOC: Block Cipher Context Data Structures
151 *
152 * These data structures define the operating context for each block cipher
153 * type.
154 */
155
156struct crypto_async_request {
157	struct list_head list;
158	crypto_completion_t complete;
159	void *data;
160	struct crypto_tfm *tfm;
161
162	u32 flags;
163};
164
165struct ablkcipher_request {
166	struct crypto_async_request base;
167
168	unsigned int nbytes;
169
170	void *info;
171
172	struct scatterlist *src;
173	struct scatterlist *dst;
174
175	void *__ctx[] CRYPTO_MINALIGN_ATTR;
176};
177
178/**
179 *	struct aead_request - AEAD request
180 *	@base: Common attributes for async crypto requests
181 *	@assoclen: Length in bytes of associated data for authentication
182 *	@cryptlen: Length of data to be encrypted or decrypted
183 *	@iv: Initialisation vector
184 *	@assoc: Associated data
185 *	@src: Source data
186 *	@dst: Destination data
187 *	@__ctx: Start of private context data
188 */
189struct aead_request {
190	struct crypto_async_request base;
191
192	unsigned int assoclen;
193	unsigned int cryptlen;
194
195	u8 *iv;
196
197	struct scatterlist *assoc;
198	struct scatterlist *src;
199	struct scatterlist *dst;
200
201	void *__ctx[] CRYPTO_MINALIGN_ATTR;
202};
203
204struct blkcipher_desc {
205	struct crypto_blkcipher *tfm;
206	void *info;
207	u32 flags;
208};
209
210struct cipher_desc {
211	struct crypto_tfm *tfm;
212	void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
213	unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
214			     const u8 *src, unsigned int nbytes);
215	void *info;
216};
217
218struct hash_desc {
219	struct crypto_hash *tfm;
220	u32 flags;
221};
222
223/**
224 * DOC: Block Cipher Algorithm Definitions
225 *
226 * These data structures define modular crypto algorithm implementations,
227 * managed via crypto_register_alg() and crypto_unregister_alg().
228 */
229
230/**
231 * struct ablkcipher_alg - asynchronous block cipher definition
232 * @min_keysize: Minimum key size supported by the transformation. This is the
233 *		 smallest key length supported by this transformation algorithm.
234 *		 This must be set to one of the pre-defined values as this is
235 *		 not hardware specific. Possible values for this field can be
236 *		 found via git grep "_MIN_KEY_SIZE" include/crypto/
237 * @max_keysize: Maximum key size supported by the transformation. This is the
238 *		 largest key length supported by this transformation algorithm.
239 *		 This must be set to one of the pre-defined values as this is
240 *		 not hardware specific. Possible values for this field can be
241 *		 found via git grep "_MAX_KEY_SIZE" include/crypto/
242 * @setkey: Set key for the transformation. This function is used to either
243 *	    program a supplied key into the hardware or store the key in the
244 *	    transformation context for programming it later. Note that this
245 *	    function does modify the transformation context. This function can
246 *	    be called multiple times during the existence of the transformation
247 *	    object, so one must make sure the key is properly reprogrammed into
248 *	    the hardware. This function is also responsible for checking the key
249 *	    length for validity. In case a software fallback was put in place in
250 *	    the @cra_init call, this function might need to use the fallback if
251 *	    the algorithm doesn't support all of the key sizes.
252 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
253 *	     the supplied scatterlist containing the blocks of data. The crypto
254 *	     API consumer is responsible for aligning the entries of the
255 *	     scatterlist properly and making sure the chunks are correctly
256 *	     sized. In case a software fallback was put in place in the
257 *	     @cra_init call, this function might need to use the fallback if
258 *	     the algorithm doesn't support all of the key sizes. In case the
259 *	     key was stored in transformation context, the key might need to be
260 *	     re-programmed into the hardware in this function. This function
261 *	     shall not modify the transformation context, as this function may
262 *	     be called in parallel with the same transformation object.
263 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
264 *	     and the conditions are exactly the same.
265 * @givencrypt: Update the IV for encryption. With this function, a cipher
266 *	        implementation may provide the function on how to update the IV
267 *	        for encryption.
268 * @givdecrypt: Update the IV for decryption. This is the reverse of
269 *	        @givencrypt .
270 * @geniv: The transformation implementation may use an "IV generator" provided
271 *	   by the kernel crypto API. Several use cases have a predefined
272 *	   approach how IVs are to be updated. For such use cases, the kernel
273 *	   crypto API provides ready-to-use implementations that can be
274 *	   referenced with this variable.
275 * @ivsize: IV size applicable for transformation. The consumer must provide an
276 *	    IV of exactly that size to perform the encrypt or decrypt operation.
277 *
278 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
279 * mandatory and must be filled.
280 */
281struct ablkcipher_alg {
282	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
283	              unsigned int keylen);
284	int (*encrypt)(struct ablkcipher_request *req);
285	int (*decrypt)(struct ablkcipher_request *req);
286	int (*givencrypt)(struct skcipher_givcrypt_request *req);
287	int (*givdecrypt)(struct skcipher_givcrypt_request *req);
288
289	const char *geniv;
290
291	unsigned int min_keysize;
292	unsigned int max_keysize;
293	unsigned int ivsize;
294};
295
296/**
297 * struct aead_alg - AEAD cipher definition
298 * @maxauthsize: Set the maximum authentication tag size supported by the
299 *		 transformation. A transformation may support smaller tag sizes.
300 *		 As the authentication tag is a message digest to ensure the
301 *		 integrity of the encrypted data, a consumer typically wants the
302 *		 largest authentication tag possible as defined by this
303 *		 variable.
304 * @setauthsize: Set authentication size for the AEAD transformation. This
305 *		 function is used to specify the consumer requested size of the
306 * 		 authentication tag to be either generated by the transformation
307 *		 during encryption or the size of the authentication tag to be
308 *		 supplied during the decryption operation. This function is also
309 *		 responsible for checking the authentication tag size for
310 *		 validity.
311 * @setkey: see struct ablkcipher_alg
312 * @encrypt: see struct ablkcipher_alg
313 * @decrypt: see struct ablkcipher_alg
314 * @givencrypt: see struct ablkcipher_alg
315 * @givdecrypt: see struct ablkcipher_alg
316 * @geniv: see struct ablkcipher_alg
317 * @ivsize: see struct ablkcipher_alg
318 *
319 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
320 * mandatory and must be filled.
321 */
322struct aead_alg {
323	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
324	              unsigned int keylen);
325	int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
326	int (*encrypt)(struct aead_request *req);
327	int (*decrypt)(struct aead_request *req);
328	int (*givencrypt)(struct aead_givcrypt_request *req);
329	int (*givdecrypt)(struct aead_givcrypt_request *req);
330
331	const char *geniv;
332
333	unsigned int ivsize;
334	unsigned int maxauthsize;
335};
336
337/**
338 * struct blkcipher_alg - synchronous block cipher definition
339 * @min_keysize: see struct ablkcipher_alg
340 * @max_keysize: see struct ablkcipher_alg
341 * @setkey: see struct ablkcipher_alg
342 * @encrypt: see struct ablkcipher_alg
343 * @decrypt: see struct ablkcipher_alg
344 * @geniv: see struct ablkcipher_alg
345 * @ivsize: see struct ablkcipher_alg
346 *
347 * All fields except @geniv and @ivsize are mandatory and must be filled.
348 */
349struct blkcipher_alg {
350	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
351	              unsigned int keylen);
352	int (*encrypt)(struct blkcipher_desc *desc,
353		       struct scatterlist *dst, struct scatterlist *src,
354		       unsigned int nbytes);
355	int (*decrypt)(struct blkcipher_desc *desc,
356		       struct scatterlist *dst, struct scatterlist *src,
357		       unsigned int nbytes);
358
359	const char *geniv;
360
361	unsigned int min_keysize;
362	unsigned int max_keysize;
363	unsigned int ivsize;
364};
365
366/**
367 * struct cipher_alg - single-block symmetric ciphers definition
368 * @cia_min_keysize: Minimum key size supported by the transformation. This is
369 *		     the smallest key length supported by this transformation
370 *		     algorithm. This must be set to one of the pre-defined
371 *		     values as this is not hardware specific. Possible values
372 *		     for this field can be found via git grep "_MIN_KEY_SIZE"
373 *		     include/crypto/
374 * @cia_max_keysize: Maximum key size supported by the transformation. This is
375 *		    the largest key length supported by this transformation
376 *		    algorithm. This must be set to one of the pre-defined values
377 *		    as this is not hardware specific. Possible values for this
378 *		    field can be found via git grep "_MAX_KEY_SIZE"
379 *		    include/crypto/
380 * @cia_setkey: Set key for the transformation. This function is used to either
381 *	        program a supplied key into the hardware or store the key in the
382 *	        transformation context for programming it later. Note that this
383 *	        function does modify the transformation context. This function
384 *	        can be called multiple times during the existence of the
385 *	        transformation object, so one must make sure the key is properly
386 *	        reprogrammed into the hardware. This function is also
387 *	        responsible for checking the key length for validity.
388 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
389 *		 single block of data, which must be @cra_blocksize big. This
390 *		 always operates on a full @cra_blocksize and it is not possible
391 *		 to encrypt a block of smaller size. The supplied buffers must
392 *		 therefore also be at least of @cra_blocksize size. Both the
393 *		 input and output buffers are always aligned to @cra_alignmask.
394 *		 In case either of the input or output buffer supplied by user
395 *		 of the crypto API is not aligned to @cra_alignmask, the crypto
396 *		 API will re-align the buffers. The re-alignment means that a
397 *		 new buffer will be allocated, the data will be copied into the
398 *		 new buffer, then the processing will happen on the new buffer,
399 *		 then the data will be copied back into the original buffer and
400 *		 finally the new buffer will be freed. In case a software
401 *		 fallback was put in place in the @cra_init call, this function
402 *		 might need to use the fallback if the algorithm doesn't support
403 *		 all of the key sizes. In case the key was stored in
404 *		 transformation context, the key might need to be re-programmed
405 *		 into the hardware in this function. This function shall not
406 *		 modify the transformation context, as this function may be
407 *		 called in parallel with the same transformation object.
408 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
409 *		 @cia_encrypt, and the conditions are exactly the same.
410 *
411 * All fields are mandatory and must be filled.
412 */
413struct cipher_alg {
414	unsigned int cia_min_keysize;
415	unsigned int cia_max_keysize;
416	int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
417	                  unsigned int keylen);
418	void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
419	void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
420};
421
422struct compress_alg {
423	int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
424			    unsigned int slen, u8 *dst, unsigned int *dlen);
425	int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
426			      unsigned int slen, u8 *dst, unsigned int *dlen);
427};
428
429/**
430 * struct rng_alg - random number generator definition
431 * @rng_make_random: The function defined by this variable obtains a random
432 *		     number. The random number generator transform must generate
433 *		     the random number out of the context provided with this
434 *		     call.
435 * @rng_reset: Reset of the random number generator by clearing the entire state.
436 *	       With the invocation of this function call, the random number
437 *             generator shall completely reinitialize its state. If the random
438 *	       number generator requires a seed for setting up a new state,
439 *	       the seed must be provided by the consumer while invoking this
440 *	       function. The required size of the seed is defined with
441 *	       @seedsize .
442 * @seedsize: The seed size required for a random number generator
443 *	      initialization defined with this variable. Some random number
444 *	      generators like the SP800-90A DRBG does not require a seed as the
445 *	      seeding is implemented internally without the need of support by
446 *	      the consumer. In this case, the seed size is set to zero.
447 */
448struct rng_alg {
449	int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
450			       unsigned int dlen);
451	int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
452
453	unsigned int seedsize;
454};
455
456
457#define cra_ablkcipher	cra_u.ablkcipher
458#define cra_aead	cra_u.aead
459#define cra_blkcipher	cra_u.blkcipher
460#define cra_cipher	cra_u.cipher
461#define cra_compress	cra_u.compress
462#define cra_rng		cra_u.rng
463
464/**
465 * struct crypto_alg - definition of a cryptograpic cipher algorithm
466 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
467 *	       CRYPTO_ALG_* flags for the flags which go in here. Those are
468 *	       used for fine-tuning the description of the transformation
469 *	       algorithm.
470 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
471 *		   of the smallest possible unit which can be transformed with
472 *		   this algorithm. The users must respect this value.
473 *		   In case of HASH transformation, it is possible for a smaller
474 *		   block than @cra_blocksize to be passed to the crypto API for
475 *		   transformation, in case of any other transformation type, an
476 * 		   error will be returned upon any attempt to transform smaller
477 *		   than @cra_blocksize chunks.
478 * @cra_ctxsize: Size of the operational context of the transformation. This
479 *		 value informs the kernel crypto API about the memory size
480 *		 needed to be allocated for the transformation context.
481 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
482 *		   buffer containing the input data for the algorithm must be
483 *		   aligned to this alignment mask. The data buffer for the
484 *		   output data must be aligned to this alignment mask. Note that
485 *		   the Crypto API will do the re-alignment in software, but
486 *		   only under special conditions and there is a performance hit.
487 *		   The re-alignment happens at these occasions for different
488 *		   @cra_u types: cipher -- For both input data and output data
489 *		   buffer; ahash -- For output hash destination buf; shash --
490 *		   For output hash destination buf.
491 *		   This is needed on hardware which is flawed by design and
492 *		   cannot pick data from arbitrary addresses.
493 * @cra_priority: Priority of this transformation implementation. In case
494 *		  multiple transformations with same @cra_name are available to
495 *		  the Crypto API, the kernel will use the one with highest
496 *		  @cra_priority.
497 * @cra_name: Generic name (usable by multiple implementations) of the
498 *	      transformation algorithm. This is the name of the transformation
499 *	      itself. This field is used by the kernel when looking up the
500 *	      providers of particular transformation.
501 * @cra_driver_name: Unique name of the transformation provider. This is the
502 *		     name of the provider of the transformation. This can be any
503 *		     arbitrary value, but in the usual case, this contains the
504 *		     name of the chip or provider and the name of the
505 *		     transformation algorithm.
506 * @cra_type: Type of the cryptographic transformation. This is a pointer to
507 *	      struct crypto_type, which implements callbacks common for all
508 *	      trasnformation types. There are multiple options:
509 *	      &crypto_blkcipher_type, &crypto_ablkcipher_type,
510 *	      &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
511 *	      This field might be empty. In that case, there are no common
512 *	      callbacks. This is the case for: cipher, compress, shash.
513 * @cra_u: Callbacks implementing the transformation. This is a union of
514 *	   multiple structures. Depending on the type of transformation selected
515 *	   by @cra_type and @cra_flags above, the associated structure must be
516 *	   filled with callbacks. This field might be empty. This is the case
517 *	   for ahash, shash.
518 * @cra_init: Initialize the cryptographic transformation object. This function
519 *	      is used to initialize the cryptographic transformation object.
520 *	      This function is called only once at the instantiation time, right
521 *	      after the transformation context was allocated. In case the
522 *	      cryptographic hardware has some special requirements which need to
523 *	      be handled by software, this function shall check for the precise
524 *	      requirement of the transformation and put any software fallbacks
525 *	      in place.
526 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
527 *	      counterpart to @cra_init, used to remove various changes set in
528 *	      @cra_init.
529 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
530 * @cra_list: internally used
531 * @cra_users: internally used
532 * @cra_refcnt: internally used
533 * @cra_destroy: internally used
534 *
535 * The struct crypto_alg describes a generic Crypto API algorithm and is common
536 * for all of the transformations. Any variable not documented here shall not
537 * be used by a cipher implementation as it is internal to the Crypto API.
538 */
539struct crypto_alg {
540	struct list_head cra_list;
541	struct list_head cra_users;
542
543	u32 cra_flags;
544	unsigned int cra_blocksize;
545	unsigned int cra_ctxsize;
546	unsigned int cra_alignmask;
547
548	int cra_priority;
549	atomic_t cra_refcnt;
550
551	char cra_name[CRYPTO_MAX_ALG_NAME];
552	char cra_driver_name[CRYPTO_MAX_ALG_NAME];
553
554	const struct crypto_type *cra_type;
555
556	union {
557		struct ablkcipher_alg ablkcipher;
558		struct aead_alg aead;
559		struct blkcipher_alg blkcipher;
560		struct cipher_alg cipher;
561		struct compress_alg compress;
562		struct rng_alg rng;
563	} cra_u;
564
565	int (*cra_init)(struct crypto_tfm *tfm);
566	void (*cra_exit)(struct crypto_tfm *tfm);
567	void (*cra_destroy)(struct crypto_alg *alg);
568
569	struct module *cra_module;
570};
571
572/*
573 * Algorithm registration interface.
574 */
575int crypto_register_alg(struct crypto_alg *alg);
576int crypto_unregister_alg(struct crypto_alg *alg);
577int crypto_register_algs(struct crypto_alg *algs, int count);
578int crypto_unregister_algs(struct crypto_alg *algs, int count);
579
580/*
581 * Algorithm query interface.
582 */
583int crypto_has_alg(const char *name, u32 type, u32 mask);
584
585/*
586 * Transforms: user-instantiated objects which encapsulate algorithms
587 * and core processing logic.  Managed via crypto_alloc_*() and
588 * crypto_free_*(), as well as the various helpers below.
589 */
590
591struct ablkcipher_tfm {
592	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
593	              unsigned int keylen);
594	int (*encrypt)(struct ablkcipher_request *req);
595	int (*decrypt)(struct ablkcipher_request *req);
596	int (*givencrypt)(struct skcipher_givcrypt_request *req);
597	int (*givdecrypt)(struct skcipher_givcrypt_request *req);
598
599	struct crypto_ablkcipher *base;
600
601	unsigned int ivsize;
602	unsigned int reqsize;
603};
604
605struct aead_tfm {
606	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
607	              unsigned int keylen);
608	int (*encrypt)(struct aead_request *req);
609	int (*decrypt)(struct aead_request *req);
610	int (*givencrypt)(struct aead_givcrypt_request *req);
611	int (*givdecrypt)(struct aead_givcrypt_request *req);
612
613	struct crypto_aead *base;
614
615	unsigned int ivsize;
616	unsigned int authsize;
617	unsigned int reqsize;
618};
619
620struct blkcipher_tfm {
621	void *iv;
622	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
623		      unsigned int keylen);
624	int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
625		       struct scatterlist *src, unsigned int nbytes);
626	int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
627		       struct scatterlist *src, unsigned int nbytes);
628};
629
630struct cipher_tfm {
631	int (*cit_setkey)(struct crypto_tfm *tfm,
632	                  const u8 *key, unsigned int keylen);
633	void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
634	void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
635};
636
637struct hash_tfm {
638	int (*init)(struct hash_desc *desc);
639	int (*update)(struct hash_desc *desc,
640		      struct scatterlist *sg, unsigned int nsg);
641	int (*final)(struct hash_desc *desc, u8 *out);
642	int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
643		      unsigned int nsg, u8 *out);
644	int (*setkey)(struct crypto_hash *tfm, const u8 *key,
645		      unsigned int keylen);
646	unsigned int digestsize;
647};
648
649struct compress_tfm {
650	int (*cot_compress)(struct crypto_tfm *tfm,
651	                    const u8 *src, unsigned int slen,
652	                    u8 *dst, unsigned int *dlen);
653	int (*cot_decompress)(struct crypto_tfm *tfm,
654	                      const u8 *src, unsigned int slen,
655	                      u8 *dst, unsigned int *dlen);
656};
657
658struct rng_tfm {
659	int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
660			      unsigned int dlen);
661	int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
662};
663
664#define crt_ablkcipher	crt_u.ablkcipher
665#define crt_aead	crt_u.aead
666#define crt_blkcipher	crt_u.blkcipher
667#define crt_cipher	crt_u.cipher
668#define crt_hash	crt_u.hash
669#define crt_compress	crt_u.compress
670#define crt_rng		crt_u.rng
671
672struct crypto_tfm {
673
674	u32 crt_flags;
675
676	union {
677		struct ablkcipher_tfm ablkcipher;
678		struct aead_tfm aead;
679		struct blkcipher_tfm blkcipher;
680		struct cipher_tfm cipher;
681		struct hash_tfm hash;
682		struct compress_tfm compress;
683		struct rng_tfm rng;
684	} crt_u;
685
686	void (*exit)(struct crypto_tfm *tfm);
687
688	struct crypto_alg *__crt_alg;
689
690	void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
691};
692
693struct crypto_ablkcipher {
694	struct crypto_tfm base;
695};
696
697struct crypto_aead {
698	struct crypto_tfm base;
699};
700
701struct crypto_blkcipher {
702	struct crypto_tfm base;
703};
704
705struct crypto_cipher {
706	struct crypto_tfm base;
707};
708
709struct crypto_comp {
710	struct crypto_tfm base;
711};
712
713struct crypto_hash {
714	struct crypto_tfm base;
715};
716
717struct crypto_rng {
718	struct crypto_tfm base;
719};
720
721enum {
722	CRYPTOA_UNSPEC,
723	CRYPTOA_ALG,
724	CRYPTOA_TYPE,
725	CRYPTOA_U32,
726	__CRYPTOA_MAX,
727};
728
729#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
730
731/* Maximum number of (rtattr) parameters for each template. */
732#define CRYPTO_MAX_ATTRS 32
733
734struct crypto_attr_alg {
735	char name[CRYPTO_MAX_ALG_NAME];
736};
737
738struct crypto_attr_type {
739	u32 type;
740	u32 mask;
741};
742
743struct crypto_attr_u32 {
744	u32 num;
745};
746
747/*
748 * Transform user interface.
749 */
750
751struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
752void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
753
754static inline void crypto_free_tfm(struct crypto_tfm *tfm)
755{
756	return crypto_destroy_tfm(tfm, tfm);
757}
758
759int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
760
761/*
762 * Transform helpers which query the underlying algorithm.
763 */
764static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
765{
766	return tfm->__crt_alg->cra_name;
767}
768
769static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
770{
771	return tfm->__crt_alg->cra_driver_name;
772}
773
774static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
775{
776	return tfm->__crt_alg->cra_priority;
777}
778
779static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
780{
781	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
782}
783
784static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
785{
786	return tfm->__crt_alg->cra_blocksize;
787}
788
789static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
790{
791	return tfm->__crt_alg->cra_alignmask;
792}
793
794static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
795{
796	return tfm->crt_flags;
797}
798
799static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
800{
801	tfm->crt_flags |= flags;
802}
803
804static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
805{
806	tfm->crt_flags &= ~flags;
807}
808
809static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
810{
811	return tfm->__crt_ctx;
812}
813
814static inline unsigned int crypto_tfm_ctx_alignment(void)
815{
816	struct crypto_tfm *tfm;
817	return __alignof__(tfm->__crt_ctx);
818}
819
820/*
821 * API wrappers.
822 */
823static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
824	struct crypto_tfm *tfm)
825{
826	return (struct crypto_ablkcipher *)tfm;
827}
828
829static inline u32 crypto_skcipher_type(u32 type)
830{
831	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
832	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
833	return type;
834}
835
836static inline u32 crypto_skcipher_mask(u32 mask)
837{
838	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
839	mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
840	return mask;
841}
842
843/**
844 * DOC: Asynchronous Block Cipher API
845 *
846 * Asynchronous block cipher API is used with the ciphers of type
847 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
848 *
849 * Asynchronous cipher operations imply that the function invocation for a
850 * cipher request returns immediately before the completion of the operation.
851 * The cipher request is scheduled as a separate kernel thread and therefore
852 * load-balanced on the different CPUs via the process scheduler. To allow
853 * the kernel crypto API to inform the caller about the completion of a cipher
854 * request, the caller must provide a callback function. That function is
855 * invoked with the cipher handle when the request completes.
856 *
857 * To support the asynchronous operation, additional information than just the
858 * cipher handle must be supplied to the kernel crypto API. That additional
859 * information is given by filling in the ablkcipher_request data structure.
860 *
861 * For the asynchronous block cipher API, the state is maintained with the tfm
862 * cipher handle. A single tfm can be used across multiple calls and in
863 * parallel. For asynchronous block cipher calls, context data supplied and
864 * only used by the caller can be referenced the request data structure in
865 * addition to the IV used for the cipher request. The maintenance of such
866 * state information would be important for a crypto driver implementer to
867 * have, because when calling the callback function upon completion of the
868 * cipher operation, that callback function may need some information about
869 * which operation just finished if it invoked multiple in parallel. This
870 * state information is unused by the kernel crypto API.
871 */
872
873/**
874 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
875 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
876 *	      ablkcipher cipher
877 * @type: specifies the type of the cipher
878 * @mask: specifies the mask for the cipher
879 *
880 * Allocate a cipher handle for an ablkcipher. The returned struct
881 * crypto_ablkcipher is the cipher handle that is required for any subsequent
882 * API invocation for that ablkcipher.
883 *
884 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
885 *	   of an error, PTR_ERR() returns the error code.
886 */
887struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
888						  u32 type, u32 mask);
889
890static inline struct crypto_tfm *crypto_ablkcipher_tfm(
891	struct crypto_ablkcipher *tfm)
892{
893	return &tfm->base;
894}
895
896/**
897 * crypto_free_ablkcipher() - zeroize and free cipher handle
898 * @tfm: cipher handle to be freed
899 */
900static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
901{
902	crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
903}
904
905/**
906 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
907 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
908 *	      ablkcipher
909 * @type: specifies the type of the cipher
910 * @mask: specifies the mask for the cipher
911 *
912 * Return: true when the ablkcipher is known to the kernel crypto API; false
913 *	   otherwise
914 */
915static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
916					u32 mask)
917{
918	return crypto_has_alg(alg_name, crypto_skcipher_type(type),
919			      crypto_skcipher_mask(mask));
920}
921
922static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
923	struct crypto_ablkcipher *tfm)
924{
925	return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
926}
927
928/**
929 * crypto_ablkcipher_ivsize() - obtain IV size
930 * @tfm: cipher handle
931 *
932 * The size of the IV for the ablkcipher referenced by the cipher handle is
933 * returned. This IV size may be zero if the cipher does not need an IV.
934 *
935 * Return: IV size in bytes
936 */
937static inline unsigned int crypto_ablkcipher_ivsize(
938	struct crypto_ablkcipher *tfm)
939{
940	return crypto_ablkcipher_crt(tfm)->ivsize;
941}
942
943/**
944 * crypto_ablkcipher_blocksize() - obtain block size of cipher
945 * @tfm: cipher handle
946 *
947 * The block size for the ablkcipher referenced with the cipher handle is
948 * returned. The caller may use that information to allocate appropriate
949 * memory for the data returned by the encryption or decryption operation
950 *
951 * Return: block size of cipher
952 */
953static inline unsigned int crypto_ablkcipher_blocksize(
954	struct crypto_ablkcipher *tfm)
955{
956	return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
957}
958
959static inline unsigned int crypto_ablkcipher_alignmask(
960	struct crypto_ablkcipher *tfm)
961{
962	return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
963}
964
965static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
966{
967	return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
968}
969
970static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
971					       u32 flags)
972{
973	crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
974}
975
976static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
977						 u32 flags)
978{
979	crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
980}
981
982/**
983 * crypto_ablkcipher_setkey() - set key for cipher
984 * @tfm: cipher handle
985 * @key: buffer holding the key
986 * @keylen: length of the key in bytes
987 *
988 * The caller provided key is set for the ablkcipher referenced by the cipher
989 * handle.
990 *
991 * Note, the key length determines the cipher type. Many block ciphers implement
992 * different cipher modes depending on the key size, such as AES-128 vs AES-192
993 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
994 * is performed.
995 *
996 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
997 */
998static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
999					   const u8 *key, unsigned int keylen)
1000{
1001	struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
1002
1003	return crt->setkey(crt->base, key, keylen);
1004}
1005
1006/**
1007 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1008 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1009 *
1010 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1011 * data structure.
1012 *
1013 * Return: crypto_ablkcipher handle
1014 */
1015static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
1016	struct ablkcipher_request *req)
1017{
1018	return __crypto_ablkcipher_cast(req->base.tfm);
1019}
1020
1021/**
1022 * crypto_ablkcipher_encrypt() - encrypt plaintext
1023 * @req: reference to the ablkcipher_request handle that holds all information
1024 *	 needed to perform the cipher operation
1025 *
1026 * Encrypt plaintext data using the ablkcipher_request handle. That data
1027 * structure and how it is filled with data is discussed with the
1028 * ablkcipher_request_* functions.
1029 *
1030 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1031 */
1032static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
1033{
1034	struct ablkcipher_tfm *crt =
1035		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1036	return crt->encrypt(req);
1037}
1038
1039/**
1040 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1041 * @req: reference to the ablkcipher_request handle that holds all information
1042 *	 needed to perform the cipher operation
1043 *
1044 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1045 * structure and how it is filled with data is discussed with the
1046 * ablkcipher_request_* functions.
1047 *
1048 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1049 */
1050static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
1051{
1052	struct ablkcipher_tfm *crt =
1053		crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
1054	return crt->decrypt(req);
1055}
1056
1057/**
1058 * DOC: Asynchronous Cipher Request Handle
1059 *
1060 * The ablkcipher_request data structure contains all pointers to data
1061 * required for the asynchronous cipher operation. This includes the cipher
1062 * handle (which can be used by multiple ablkcipher_request instances), pointer
1063 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1064 * as a handle to the ablkcipher_request_* API calls in a similar way as
1065 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1066 */
1067
1068/**
1069 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1070 * @tfm: cipher handle
1071 *
1072 * Return: number of bytes
1073 */
1074static inline unsigned int crypto_ablkcipher_reqsize(
1075	struct crypto_ablkcipher *tfm)
1076{
1077	return crypto_ablkcipher_crt(tfm)->reqsize;
1078}
1079
1080/**
1081 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1082 * @req: request handle to be modified
1083 * @tfm: cipher handle that shall be added to the request handle
1084 *
1085 * Allow the caller to replace the existing ablkcipher handle in the request
1086 * data structure with a different one.
1087 */
1088static inline void ablkcipher_request_set_tfm(
1089	struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
1090{
1091	req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
1092}
1093
1094static inline struct ablkcipher_request *ablkcipher_request_cast(
1095	struct crypto_async_request *req)
1096{
1097	return container_of(req, struct ablkcipher_request, base);
1098}
1099
1100/**
1101 * ablkcipher_request_alloc() - allocate request data structure
1102 * @tfm: cipher handle to be registered with the request
1103 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1104 *
1105 * Allocate the request data structure that must be used with the ablkcipher
1106 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1107 * handle is registered in the request data structure.
1108 *
1109 * Return: allocated request handle in case of success; IS_ERR() is true in case
1110 *	   of an error, PTR_ERR() returns the error code.
1111 */
1112static inline struct ablkcipher_request *ablkcipher_request_alloc(
1113	struct crypto_ablkcipher *tfm, gfp_t gfp)
1114{
1115	struct ablkcipher_request *req;
1116
1117	req = kmalloc(sizeof(struct ablkcipher_request) +
1118		      crypto_ablkcipher_reqsize(tfm), gfp);
1119
1120	if (likely(req))
1121		ablkcipher_request_set_tfm(req, tfm);
1122
1123	return req;
1124}
1125
1126/**
1127 * ablkcipher_request_free() - zeroize and free request data structure
1128 * @req: request data structure cipher handle to be freed
1129 */
1130static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1131{
1132	kzfree(req);
1133}
1134
1135/**
1136 * ablkcipher_request_set_callback() - set asynchronous callback function
1137 * @req: request handle
1138 * @flags: specify zero or an ORing of the flags
1139 *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1140 *	   increase the wait queue beyond the initial maximum size;
1141 *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1142 * @compl: callback function pointer to be registered with the request handle
1143 * @data: The data pointer refers to memory that is not used by the kernel
1144 *	  crypto API, but provided to the callback function for it to use. Here,
1145 *	  the caller can provide a reference to memory the callback function can
1146 *	  operate on. As the callback function is invoked asynchronously to the
1147 *	  related functionality, it may need to access data structures of the
1148 *	  related functionality which can be referenced using this pointer. The
1149 *	  callback function can access the memory via the "data" field in the
1150 *	  crypto_async_request data structure provided to the callback function.
1151 *
1152 * This function allows setting the callback function that is triggered once the
1153 * cipher operation completes.
1154 *
1155 * The callback function is registered with the ablkcipher_request handle and
1156 * must comply with the following template
1157 *
1158 *	void callback_function(struct crypto_async_request *req, int error)
1159 */
1160static inline void ablkcipher_request_set_callback(
1161	struct ablkcipher_request *req,
1162	u32 flags, crypto_completion_t compl, void *data)
1163{
1164	req->base.complete = compl;
1165	req->base.data = data;
1166	req->base.flags = flags;
1167}
1168
1169/**
1170 * ablkcipher_request_set_crypt() - set data buffers
1171 * @req: request handle
1172 * @src: source scatter / gather list
1173 * @dst: destination scatter / gather list
1174 * @nbytes: number of bytes to process from @src
1175 * @iv: IV for the cipher operation which must comply with the IV size defined
1176 *      by crypto_ablkcipher_ivsize
1177 *
1178 * This function allows setting of the source data and destination data
1179 * scatter / gather lists.
1180 *
1181 * For encryption, the source is treated as the plaintext and the
1182 * destination is the ciphertext. For a decryption operation, the use is
1183 * reversed - the source is the ciphertext and the destination is the plaintext.
1184 */
1185static inline void ablkcipher_request_set_crypt(
1186	struct ablkcipher_request *req,
1187	struct scatterlist *src, struct scatterlist *dst,
1188	unsigned int nbytes, void *iv)
1189{
1190	req->src = src;
1191	req->dst = dst;
1192	req->nbytes = nbytes;
1193	req->info = iv;
1194}
1195
1196/**
1197 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1198 *
1199 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1200 * (listed as type "aead" in /proc/crypto)
1201 *
1202 * The most prominent examples for this type of encryption is GCM and CCM.
1203 * However, the kernel supports other types of AEAD ciphers which are defined
1204 * with the following cipher string:
1205 *
1206 *	authenc(keyed message digest, block cipher)
1207 *
1208 * For example: authenc(hmac(sha256), cbc(aes))
1209 *
1210 * The example code provided for the asynchronous block cipher operation
1211 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1212 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1213 * operation, the aead_request_set_assoc function must be used to set the
1214 * pointer to the associated data memory location before performing the
1215 * encryption or decryption operation. In case of an encryption, the associated
1216 * data memory is filled during the encryption operation. For decryption, the
1217 * associated data memory must contain data that is used to verify the integrity
1218 * of the decrypted data. Another deviation from the asynchronous block cipher
1219 * operation is that the caller should explicitly check for -EBADMSG of the
1220 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1221 * a breach in the integrity of the message. In essence, that -EBADMSG error
1222 * code is the key bonus an AEAD cipher has over "standard" block chaining
1223 * modes.
1224 */
1225
1226static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
1227{
1228	return (struct crypto_aead *)tfm;
1229}
1230
1231/**
1232 * crypto_alloc_aead() - allocate AEAD cipher handle
1233 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1234 *	     AEAD cipher
1235 * @type: specifies the type of the cipher
1236 * @mask: specifies the mask for the cipher
1237 *
1238 * Allocate a cipher handle for an AEAD. The returned struct
1239 * crypto_aead is the cipher handle that is required for any subsequent
1240 * API invocation for that AEAD.
1241 *
1242 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1243 *	   of an error, PTR_ERR() returns the error code.
1244 */
1245struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
1246
1247static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
1248{
1249	return &tfm->base;
1250}
1251
1252/**
1253 * crypto_free_aead() - zeroize and free aead handle
1254 * @tfm: cipher handle to be freed
1255 */
1256static inline void crypto_free_aead(struct crypto_aead *tfm)
1257{
1258	crypto_free_tfm(crypto_aead_tfm(tfm));
1259}
1260
1261static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
1262{
1263	return &crypto_aead_tfm(tfm)->crt_aead;
1264}
1265
1266/**
1267 * crypto_aead_ivsize() - obtain IV size
1268 * @tfm: cipher handle
1269 *
1270 * The size of the IV for the aead referenced by the cipher handle is
1271 * returned. This IV size may be zero if the cipher does not need an IV.
1272 *
1273 * Return: IV size in bytes
1274 */
1275static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
1276{
1277	return crypto_aead_crt(tfm)->ivsize;
1278}
1279
1280/**
1281 * crypto_aead_authsize() - obtain maximum authentication data size
1282 * @tfm: cipher handle
1283 *
1284 * The maximum size of the authentication data for the AEAD cipher referenced
1285 * by the AEAD cipher handle is returned. The authentication data size may be
1286 * zero if the cipher implements a hard-coded maximum.
1287 *
1288 * The authentication data may also be known as "tag value".
1289 *
1290 * Return: authentication data size / tag size in bytes
1291 */
1292static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
1293{
1294	return crypto_aead_crt(tfm)->authsize;
1295}
1296
1297/**
1298 * crypto_aead_blocksize() - obtain block size of cipher
1299 * @tfm: cipher handle
1300 *
1301 * The block size for the AEAD referenced with the cipher handle is returned.
1302 * The caller may use that information to allocate appropriate memory for the
1303 * data returned by the encryption or decryption operation
1304 *
1305 * Return: block size of cipher
1306 */
1307static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
1308{
1309	return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
1310}
1311
1312static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
1313{
1314	return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
1315}
1316
1317static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
1318{
1319	return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
1320}
1321
1322static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
1323{
1324	crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
1325}
1326
1327static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
1328{
1329	crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
1330}
1331
1332/**
1333 * crypto_aead_setkey() - set key for cipher
1334 * @tfm: cipher handle
1335 * @key: buffer holding the key
1336 * @keylen: length of the key in bytes
1337 *
1338 * The caller provided key is set for the AEAD referenced by the cipher
1339 * handle.
1340 *
1341 * Note, the key length determines the cipher type. Many block ciphers implement
1342 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1343 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1344 * is performed.
1345 *
1346 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1347 */
1348static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1349				     unsigned int keylen)
1350{
1351	struct aead_tfm *crt = crypto_aead_crt(tfm);
1352
1353	return crt->setkey(crt->base, key, keylen);
1354}
1355
1356/**
1357 * crypto_aead_setauthsize() - set authentication data size
1358 * @tfm: cipher handle
1359 * @authsize: size of the authentication data / tag in bytes
1360 *
1361 * Set the authentication data size / tag size. AEAD requires an authentication
1362 * tag (or MAC) in addition to the associated data.
1363 *
1364 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1365 */
1366int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
1367
1368static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
1369{
1370	return __crypto_aead_cast(req->base.tfm);
1371}
1372
1373/**
1374 * crypto_aead_encrypt() - encrypt plaintext
1375 * @req: reference to the aead_request handle that holds all information
1376 *	 needed to perform the cipher operation
1377 *
1378 * Encrypt plaintext data using the aead_request handle. That data structure
1379 * and how it is filled with data is discussed with the aead_request_*
1380 * functions.
1381 *
1382 * IMPORTANT NOTE The encryption operation creates the authentication data /
1383 *		  tag. That data is concatenated with the created ciphertext.
1384 *		  The ciphertext memory size is therefore the given number of
1385 *		  block cipher blocks + the size defined by the
1386 *		  crypto_aead_setauthsize invocation. The caller must ensure
1387 *		  that sufficient memory is available for the ciphertext and
1388 *		  the authentication tag.
1389 *
1390 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1391 */
1392static inline int crypto_aead_encrypt(struct aead_request *req)
1393{
1394	return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
1395}
1396
1397/**
1398 * crypto_aead_decrypt() - decrypt ciphertext
1399 * @req: reference to the ablkcipher_request handle that holds all information
1400 *	 needed to perform the cipher operation
1401 *
1402 * Decrypt ciphertext data using the aead_request handle. That data structure
1403 * and how it is filled with data is discussed with the aead_request_*
1404 * functions.
1405 *
1406 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1407 *		  authentication data / tag. That authentication data / tag
1408 *		  must have the size defined by the crypto_aead_setauthsize
1409 *		  invocation.
1410 *
1411 *
1412 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1413 *	   cipher operation performs the authentication of the data during the
1414 *	   decryption operation. Therefore, the function returns this error if
1415 *	   the authentication of the ciphertext was unsuccessful (i.e. the
1416 *	   integrity of the ciphertext or the associated data was violated);
1417 *	   < 0 if an error occurred.
1418 */
1419static inline int crypto_aead_decrypt(struct aead_request *req)
1420{
1421	if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
1422		return -EINVAL;
1423
1424	return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
1425}
1426
1427/**
1428 * DOC: Asynchronous AEAD Request Handle
1429 *
1430 * The aead_request data structure contains all pointers to data required for
1431 * the AEAD cipher operation. This includes the cipher handle (which can be
1432 * used by multiple aead_request instances), pointer to plaintext and
1433 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1434 * aead_request_* API calls in a similar way as AEAD handle to the
1435 * crypto_aead_* API calls.
1436 */
1437
1438/**
1439 * crypto_aead_reqsize() - obtain size of the request data structure
1440 * @tfm: cipher handle
1441 *
1442 * Return: number of bytes
1443 */
1444static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
1445{
1446	return crypto_aead_crt(tfm)->reqsize;
1447}
1448
1449/**
1450 * aead_request_set_tfm() - update cipher handle reference in request
1451 * @req: request handle to be modified
1452 * @tfm: cipher handle that shall be added to the request handle
1453 *
1454 * Allow the caller to replace the existing aead handle in the request
1455 * data structure with a different one.
1456 */
1457static inline void aead_request_set_tfm(struct aead_request *req,
1458					struct crypto_aead *tfm)
1459{
1460	req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
1461}
1462
1463/**
1464 * aead_request_alloc() - allocate request data structure
1465 * @tfm: cipher handle to be registered with the request
1466 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1467 *
1468 * Allocate the request data structure that must be used with the AEAD
1469 * encrypt and decrypt API calls. During the allocation, the provided aead
1470 * handle is registered in the request data structure.
1471 *
1472 * Return: allocated request handle in case of success; IS_ERR() is true in case
1473 *	   of an error, PTR_ERR() returns the error code.
1474 */
1475static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
1476						      gfp_t gfp)
1477{
1478	struct aead_request *req;
1479
1480	req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
1481
1482	if (likely(req))
1483		aead_request_set_tfm(req, tfm);
1484
1485	return req;
1486}
1487
1488/**
1489 * aead_request_free() - zeroize and free request data structure
1490 * @req: request data structure cipher handle to be freed
1491 */
1492static inline void aead_request_free(struct aead_request *req)
1493{
1494	kzfree(req);
1495}
1496
1497/**
1498 * aead_request_set_callback() - set asynchronous callback function
1499 * @req: request handle
1500 * @flags: specify zero or an ORing of the flags
1501 *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1502 *	   increase the wait queue beyond the initial maximum size;
1503 *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1504 * @compl: callback function pointer to be registered with the request handle
1505 * @data: The data pointer refers to memory that is not used by the kernel
1506 *	  crypto API, but provided to the callback function for it to use. Here,
1507 *	  the caller can provide a reference to memory the callback function can
1508 *	  operate on. As the callback function is invoked asynchronously to the
1509 *	  related functionality, it may need to access data structures of the
1510 *	  related functionality which can be referenced using this pointer. The
1511 *	  callback function can access the memory via the "data" field in the
1512 *	  crypto_async_request data structure provided to the callback function.
1513 *
1514 * Setting the callback function that is triggered once the cipher operation
1515 * completes
1516 *
1517 * The callback function is registered with the aead_request handle and
1518 * must comply with the following template
1519 *
1520 *	void callback_function(struct crypto_async_request *req, int error)
1521 */
1522static inline void aead_request_set_callback(struct aead_request *req,
1523					     u32 flags,
1524					     crypto_completion_t compl,
1525					     void *data)
1526{
1527	req->base.complete = compl;
1528	req->base.data = data;
1529	req->base.flags = flags;
1530}
1531
1532/**
1533 * aead_request_set_crypt - set data buffers
1534 * @req: request handle
1535 * @src: source scatter / gather list
1536 * @dst: destination scatter / gather list
1537 * @cryptlen: number of bytes to process from @src
1538 * @iv: IV for the cipher operation which must comply with the IV size defined
1539 *      by crypto_aead_ivsize()
1540 *
1541 * Setting the source data and destination data scatter / gather lists.
1542 *
1543 * For encryption, the source is treated as the plaintext and the
1544 * destination is the ciphertext. For a decryption operation, the use is
1545 * reversed - the source is the ciphertext and the destination is the plaintext.
1546 *
1547 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1548 *		  the caller must concatenate the ciphertext followed by the
1549 *		  authentication tag and provide the entire data stream to the
1550 *		  decryption operation (i.e. the data length used for the
1551 *		  initialization of the scatterlist and the data length for the
1552 *		  decryption operation is identical). For encryption, however,
1553 *		  the authentication tag is created while encrypting the data.
1554 *		  The destination buffer must hold sufficient space for the
1555 *		  ciphertext and the authentication tag while the encryption
1556 *		  invocation must only point to the plaintext data size. The
1557 *		  following code snippet illustrates the memory usage
1558 *		  buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1559 *		  sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1560 *		  aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1561 */
1562static inline void aead_request_set_crypt(struct aead_request *req,
1563					  struct scatterlist *src,
1564					  struct scatterlist *dst,
1565					  unsigned int cryptlen, u8 *iv)
1566{
1567	req->src = src;
1568	req->dst = dst;
1569	req->cryptlen = cryptlen;
1570	req->iv = iv;
1571}
1572
1573/**
1574 * aead_request_set_assoc() - set the associated data scatter / gather list
1575 * @req: request handle
1576 * @assoc: associated data scatter / gather list
1577 * @assoclen: number of bytes to process from @assoc
1578 *
1579 * For encryption, the memory is filled with the associated data. For
1580 * decryption, the memory must point to the associated data.
1581 */
1582static inline void aead_request_set_assoc(struct aead_request *req,
1583					  struct scatterlist *assoc,
1584					  unsigned int assoclen)
1585{
1586	req->assoc = assoc;
1587	req->assoclen = assoclen;
1588}
1589
1590/**
1591 * DOC: Synchronous Block Cipher API
1592 *
1593 * The synchronous block cipher API is used with the ciphers of type
1594 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1595 *
1596 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1597 * used in multiple calls and in parallel, this info should not be changeable
1598 * (unless a lock is used). This applies, for example, to the symmetric key.
1599 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1600 * structure for synchronous blkcipher api. So, its the only state info that can
1601 * be kept for synchronous calls without using a big lock across a tfm.
1602 *
1603 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1604 * consisting of a template (a block chaining mode) and a single block cipher
1605 * primitive (e.g. AES).
1606 *
1607 * The plaintext data buffer and the ciphertext data buffer are pointed to
1608 * by using scatter/gather lists. The cipher operation is performed
1609 * on all segments of the provided scatter/gather lists.
1610 *
1611 * The kernel crypto API supports a cipher operation "in-place" which means that
1612 * the caller may provide the same scatter/gather list for the plaintext and
1613 * cipher text. After the completion of the cipher operation, the plaintext
1614 * data is replaced with the ciphertext data in case of an encryption and vice
1615 * versa for a decryption. The caller must ensure that the scatter/gather lists
1616 * for the output data point to sufficiently large buffers, i.e. multiples of
1617 * the block size of the cipher.
1618 */
1619
1620static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1621	struct crypto_tfm *tfm)
1622{
1623	return (struct crypto_blkcipher *)tfm;
1624}
1625
1626static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1627	struct crypto_tfm *tfm)
1628{
1629	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1630	return __crypto_blkcipher_cast(tfm);
1631}
1632
1633/**
1634 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1635 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1636 *	      blkcipher cipher
1637 * @type: specifies the type of the cipher
1638 * @mask: specifies the mask for the cipher
1639 *
1640 * Allocate a cipher handle for a block cipher. The returned struct
1641 * crypto_blkcipher is the cipher handle that is required for any subsequent
1642 * API invocation for that block cipher.
1643 *
1644 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1645 *	   of an error, PTR_ERR() returns the error code.
1646 */
1647static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1648	const char *alg_name, u32 type, u32 mask)
1649{
1650	type &= ~CRYPTO_ALG_TYPE_MASK;
1651	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1652	mask |= CRYPTO_ALG_TYPE_MASK;
1653
1654	return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1655}
1656
1657static inline struct crypto_tfm *crypto_blkcipher_tfm(
1658	struct crypto_blkcipher *tfm)
1659{
1660	return &tfm->base;
1661}
1662
1663/**
1664 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1665 * @tfm: cipher handle to be freed
1666 */
1667static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1668{
1669	crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1670}
1671
1672/**
1673 * crypto_has_blkcipher() - Search for the availability of a block cipher
1674 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1675 *	      block cipher
1676 * @type: specifies the type of the cipher
1677 * @mask: specifies the mask for the cipher
1678 *
1679 * Return: true when the block cipher is known to the kernel crypto API; false
1680 *	   otherwise
1681 */
1682static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1683{
1684	type &= ~CRYPTO_ALG_TYPE_MASK;
1685	type |= CRYPTO_ALG_TYPE_BLKCIPHER;
1686	mask |= CRYPTO_ALG_TYPE_MASK;
1687
1688	return crypto_has_alg(alg_name, type, mask);
1689}
1690
1691/**
1692 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1693 * @tfm: cipher handle
1694 *
1695 * Return: The character string holding the name of the cipher
1696 */
1697static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1698{
1699	return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1700}
1701
1702static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1703	struct crypto_blkcipher *tfm)
1704{
1705	return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1706}
1707
1708static inline struct blkcipher_alg *crypto_blkcipher_alg(
1709	struct crypto_blkcipher *tfm)
1710{
1711	return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1712}
1713
1714/**
1715 * crypto_blkcipher_ivsize() - obtain IV size
1716 * @tfm: cipher handle
1717 *
1718 * The size of the IV for the block cipher referenced by the cipher handle is
1719 * returned. This IV size may be zero if the cipher does not need an IV.
1720 *
1721 * Return: IV size in bytes
1722 */
1723static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1724{
1725	return crypto_blkcipher_alg(tfm)->ivsize;
1726}
1727
1728/**
1729 * crypto_blkcipher_blocksize() - obtain block size of cipher
1730 * @tfm: cipher handle
1731 *
1732 * The block size for the block cipher referenced with the cipher handle is
1733 * returned. The caller may use that information to allocate appropriate
1734 * memory for the data returned by the encryption or decryption operation.
1735 *
1736 * Return: block size of cipher
1737 */
1738static inline unsigned int crypto_blkcipher_blocksize(
1739	struct crypto_blkcipher *tfm)
1740{
1741	return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1742}
1743
1744static inline unsigned int crypto_blkcipher_alignmask(
1745	struct crypto_blkcipher *tfm)
1746{
1747	return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1748}
1749
1750static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1751{
1752	return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1753}
1754
1755static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1756					      u32 flags)
1757{
1758	crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1759}
1760
1761static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1762						u32 flags)
1763{
1764	crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1765}
1766
1767/**
1768 * crypto_blkcipher_setkey() - set key for cipher
1769 * @tfm: cipher handle
1770 * @key: buffer holding the key
1771 * @keylen: length of the key in bytes
1772 *
1773 * The caller provided key is set for the block cipher referenced by the cipher
1774 * handle.
1775 *
1776 * Note, the key length determines the cipher type. Many block ciphers implement
1777 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1778 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1779 * is performed.
1780 *
1781 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1782 */
1783static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1784					  const u8 *key, unsigned int keylen)
1785{
1786	return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1787						 key, keylen);
1788}
1789
1790/**
1791 * crypto_blkcipher_encrypt() - encrypt plaintext
1792 * @desc: reference to the block cipher handle with meta data
1793 * @dst: scatter/gather list that is filled by the cipher operation with the
1794 *	ciphertext
1795 * @src: scatter/gather list that holds the plaintext
1796 * @nbytes: number of bytes of the plaintext to encrypt.
1797 *
1798 * Encrypt plaintext data using the IV set by the caller with a preceding
1799 * call of crypto_blkcipher_set_iv.
1800 *
1801 * The blkcipher_desc data structure must be filled by the caller and can
1802 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1803 * with the block cipher handle; desc.flags is filled with either
1804 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1805 *
1806 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1807 */
1808static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1809					   struct scatterlist *dst,
1810					   struct scatterlist *src,
1811					   unsigned int nbytes)
1812{
1813	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1814	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1815}
1816
1817/**
1818 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1819 * @desc: reference to the block cipher handle with meta data
1820 * @dst: scatter/gather list that is filled by the cipher operation with the
1821 *	ciphertext
1822 * @src: scatter/gather list that holds the plaintext
1823 * @nbytes: number of bytes of the plaintext to encrypt.
1824 *
1825 * Encrypt plaintext data with the use of an IV that is solely used for this
1826 * cipher operation. Any previously set IV is not used.
1827 *
1828 * The blkcipher_desc data structure must be filled by the caller and can
1829 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1830 * with the block cipher handle; desc.info is filled with the IV to be used for
1831 * the current operation; desc.flags is filled with either
1832 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1833 *
1834 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1835 */
1836static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1837					      struct scatterlist *dst,
1838					      struct scatterlist *src,
1839					      unsigned int nbytes)
1840{
1841	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1842}
1843
1844/**
1845 * crypto_blkcipher_decrypt() - decrypt ciphertext
1846 * @desc: reference to the block cipher handle with meta data
1847 * @dst: scatter/gather list that is filled by the cipher operation with the
1848 *	plaintext
1849 * @src: scatter/gather list that holds the ciphertext
1850 * @nbytes: number of bytes of the ciphertext to decrypt.
1851 *
1852 * Decrypt ciphertext data using the IV set by the caller with a preceding
1853 * call of crypto_blkcipher_set_iv.
1854 *
1855 * The blkcipher_desc data structure must be filled by the caller as documented
1856 * for the crypto_blkcipher_encrypt call above.
1857 *
1858 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1859 *
1860 */
1861static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1862					   struct scatterlist *dst,
1863					   struct scatterlist *src,
1864					   unsigned int nbytes)
1865{
1866	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1867	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1868}
1869
1870/**
1871 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1872 * @desc: reference to the block cipher handle with meta data
1873 * @dst: scatter/gather list that is filled by the cipher operation with the
1874 *	plaintext
1875 * @src: scatter/gather list that holds the ciphertext
1876 * @nbytes: number of bytes of the ciphertext to decrypt.
1877 *
1878 * Decrypt ciphertext data with the use of an IV that is solely used for this
1879 * cipher operation. Any previously set IV is not used.
1880 *
1881 * The blkcipher_desc data structure must be filled by the caller as documented
1882 * for the crypto_blkcipher_encrypt_iv call above.
1883 *
1884 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1885 */
1886static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1887					      struct scatterlist *dst,
1888					      struct scatterlist *src,
1889					      unsigned int nbytes)
1890{
1891	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1892}
1893
1894/**
1895 * crypto_blkcipher_set_iv() - set IV for cipher
1896 * @tfm: cipher handle
1897 * @src: buffer holding the IV
1898 * @len: length of the IV in bytes
1899 *
1900 * The caller provided IV is set for the block cipher referenced by the cipher
1901 * handle.
1902 */
1903static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1904					   const u8 *src, unsigned int len)
1905{
1906	memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1907}
1908
1909/**
1910 * crypto_blkcipher_get_iv() - obtain IV from cipher
1911 * @tfm: cipher handle
1912 * @dst: buffer filled with the IV
1913 * @len: length of the buffer dst
1914 *
1915 * The caller can obtain the IV set for the block cipher referenced by the
1916 * cipher handle and store it into the user-provided buffer. If the buffer
1917 * has an insufficient space, the IV is truncated to fit the buffer.
1918 */
1919static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1920					   u8 *dst, unsigned int len)
1921{
1922	memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1923}
1924
1925/**
1926 * DOC: Single Block Cipher API
1927 *
1928 * The single block cipher API is used with the ciphers of type
1929 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1930 *
1931 * Using the single block cipher API calls, operations with the basic cipher
1932 * primitive can be implemented. These cipher primitives exclude any block
1933 * chaining operations including IV handling.
1934 *
1935 * The purpose of this single block cipher API is to support the implementation
1936 * of templates or other concepts that only need to perform the cipher operation
1937 * on one block at a time. Templates invoke the underlying cipher primitive
1938 * block-wise and process either the input or the output data of these cipher
1939 * operations.
1940 */
1941
1942static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1943{
1944	return (struct crypto_cipher *)tfm;
1945}
1946
1947static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1948{
1949	BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1950	return __crypto_cipher_cast(tfm);
1951}
1952
1953/**
1954 * crypto_alloc_cipher() - allocate single block cipher handle
1955 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1956 *	     single block cipher
1957 * @type: specifies the type of the cipher
1958 * @mask: specifies the mask for the cipher
1959 *
1960 * Allocate a cipher handle for a single block cipher. The returned struct
1961 * crypto_cipher is the cipher handle that is required for any subsequent API
1962 * invocation for that single block cipher.
1963 *
1964 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1965 *	   of an error, PTR_ERR() returns the error code.
1966 */
1967static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1968							u32 type, u32 mask)
1969{
1970	type &= ~CRYPTO_ALG_TYPE_MASK;
1971	type |= CRYPTO_ALG_TYPE_CIPHER;
1972	mask |= CRYPTO_ALG_TYPE_MASK;
1973
1974	return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1975}
1976
1977static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1978{
1979	return &tfm->base;
1980}
1981
1982/**
1983 * crypto_free_cipher() - zeroize and free the single block cipher handle
1984 * @tfm: cipher handle to be freed
1985 */
1986static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1987{
1988	crypto_free_tfm(crypto_cipher_tfm(tfm));
1989}
1990
1991/**
1992 * crypto_has_cipher() - Search for the availability of a single block cipher
1993 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1994 *	     single block cipher
1995 * @type: specifies the type of the cipher
1996 * @mask: specifies the mask for the cipher
1997 *
1998 * Return: true when the single block cipher is known to the kernel crypto API;
1999 *	   false otherwise
2000 */
2001static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
2002{
2003	type &= ~CRYPTO_ALG_TYPE_MASK;
2004	type |= CRYPTO_ALG_TYPE_CIPHER;
2005	mask |= CRYPTO_ALG_TYPE_MASK;
2006
2007	return crypto_has_alg(alg_name, type, mask);
2008}
2009
2010static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
2011{
2012	return &crypto_cipher_tfm(tfm)->crt_cipher;
2013}
2014
2015/**
2016 * crypto_cipher_blocksize() - obtain block size for cipher
2017 * @tfm: cipher handle
2018 *
2019 * The block size for the single block cipher referenced with the cipher handle
2020 * tfm is returned. The caller may use that information to allocate appropriate
2021 * memory for the data returned by the encryption or decryption operation
2022 *
2023 * Return: block size of cipher
2024 */
2025static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
2026{
2027	return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
2028}
2029
2030static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
2031{
2032	return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
2033}
2034
2035static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
2036{
2037	return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
2038}
2039
2040static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
2041					   u32 flags)
2042{
2043	crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
2044}
2045
2046static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
2047					     u32 flags)
2048{
2049	crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
2050}
2051
2052/**
2053 * crypto_cipher_setkey() - set key for cipher
2054 * @tfm: cipher handle
2055 * @key: buffer holding the key
2056 * @keylen: length of the key in bytes
2057 *
2058 * The caller provided key is set for the single block cipher referenced by the
2059 * cipher handle.
2060 *
2061 * Note, the key length determines the cipher type. Many block ciphers implement
2062 * different cipher modes depending on the key size, such as AES-128 vs AES-192
2063 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
2064 * is performed.
2065 *
2066 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2067 */
2068static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
2069                                       const u8 *key, unsigned int keylen)
2070{
2071	return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
2072						  key, keylen);
2073}
2074
2075/**
2076 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
2077 * @tfm: cipher handle
2078 * @dst: points to the buffer that will be filled with the ciphertext
2079 * @src: buffer holding the plaintext to be encrypted
2080 *
2081 * Invoke the encryption operation of one block. The caller must ensure that
2082 * the plaintext and ciphertext buffers are at least one block in size.
2083 */
2084static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
2085					     u8 *dst, const u8 *src)
2086{
2087	crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
2088						dst, src);
2089}
2090
2091/**
2092 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
2093 * @tfm: cipher handle
2094 * @dst: points to the buffer that will be filled with the plaintext
2095 * @src: buffer holding the ciphertext to be decrypted
2096 *
2097 * Invoke the decryption operation of one block. The caller must ensure that
2098 * the plaintext and ciphertext buffers are at least one block in size.
2099 */
2100static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
2101					     u8 *dst, const u8 *src)
2102{
2103	crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
2104						dst, src);
2105}
2106
2107/**
2108 * DOC: Synchronous Message Digest API
2109 *
2110 * The synchronous message digest API is used with the ciphers of type
2111 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
2112 */
2113
2114static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
2115{
2116	return (struct crypto_hash *)tfm;
2117}
2118
2119static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
2120{
2121	BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
2122	       CRYPTO_ALG_TYPE_HASH_MASK);
2123	return __crypto_hash_cast(tfm);
2124}
2125
2126/**
2127 * crypto_alloc_hash() - allocate synchronous message digest handle
2128 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2129 *	      message digest cipher
2130 * @type: specifies the type of the cipher
2131 * @mask: specifies the mask for the cipher
2132 *
2133 * Allocate a cipher handle for a message digest. The returned struct
2134 * crypto_hash is the cipher handle that is required for any subsequent
2135 * API invocation for that message digest.
2136 *
2137 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
2138 * of an error, PTR_ERR() returns the error code.
2139 */
2140static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
2141						    u32 type, u32 mask)
2142{
2143	type &= ~CRYPTO_ALG_TYPE_MASK;
2144	mask &= ~CRYPTO_ALG_TYPE_MASK;
2145	type |= CRYPTO_ALG_TYPE_HASH;
2146	mask |= CRYPTO_ALG_TYPE_HASH_MASK;
2147
2148	return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
2149}
2150
2151static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
2152{
2153	return &tfm->base;
2154}
2155
2156/**
2157 * crypto_free_hash() - zeroize and free message digest handle
2158 * @tfm: cipher handle to be freed
2159 */
2160static inline void crypto_free_hash(struct crypto_hash *tfm)
2161{
2162	crypto_free_tfm(crypto_hash_tfm(tfm));
2163}
2164
2165/**
2166 * crypto_has_hash() - Search for the availability of a message digest
2167 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2168 *	      message digest cipher
2169 * @type: specifies the type of the cipher
2170 * @mask: specifies the mask for the cipher
2171 *
2172 * Return: true when the message digest cipher is known to the kernel crypto
2173 *	   API; false otherwise
2174 */
2175static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
2176{
2177	type &= ~CRYPTO_ALG_TYPE_MASK;
2178	mask &= ~CRYPTO_ALG_TYPE_MASK;
2179	type |= CRYPTO_ALG_TYPE_HASH;
2180	mask |= CRYPTO_ALG_TYPE_HASH_MASK;
2181
2182	return crypto_has_alg(alg_name, type, mask);
2183}
2184
2185static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
2186{
2187	return &crypto_hash_tfm(tfm)->crt_hash;
2188}
2189
2190/**
2191 * crypto_hash_blocksize() - obtain block size for message digest
2192 * @tfm: cipher handle
2193 *
2194 * The block size for the message digest cipher referenced with the cipher
2195 * handle is returned.
2196 *
2197 * Return: block size of cipher
2198 */
2199static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
2200{
2201	return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
2202}
2203
2204static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
2205{
2206	return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
2207}
2208
2209/**
2210 * crypto_hash_digestsize() - obtain message digest size
2211 * @tfm: cipher handle
2212 *
2213 * The size for the message digest created by the message digest cipher
2214 * referenced with the cipher handle is returned.
2215 *
2216 * Return: message digest size
2217 */
2218static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
2219{
2220	return crypto_hash_crt(tfm)->digestsize;
2221}
2222
2223static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
2224{
2225	return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
2226}
2227
2228static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
2229{
2230	crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
2231}
2232
2233static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
2234{
2235	crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
2236}
2237
2238/**
2239 * crypto_hash_init() - (re)initialize message digest handle
2240 * @desc: cipher request handle that to be filled by caller --
2241 *	  desc.tfm is filled with the hash cipher handle;
2242 *	  desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
2243 *
2244 * The call (re-)initializes the message digest referenced by the hash cipher
2245 * request handle. Any potentially existing state created by previous
2246 * operations is discarded.
2247 *
2248 * Return: 0 if the message digest initialization was successful; < 0 if an
2249 *	   error occurred
2250 */
2251static inline int crypto_hash_init(struct hash_desc *desc)
2252{
2253	return crypto_hash_crt(desc->tfm)->init(desc);
2254}
2255
2256/**
2257 * crypto_hash_update() - add data to message digest for processing
2258 * @desc: cipher request handle
2259 * @sg: scatter / gather list pointing to the data to be added to the message
2260 *      digest
2261 * @nbytes: number of bytes to be processed from @sg
2262 *
2263 * Updates the message digest state of the cipher handle pointed to by the
2264 * hash cipher request handle with the input data pointed to by the
2265 * scatter/gather list.
2266 *
2267 * Return: 0 if the message digest update was successful; < 0 if an error
2268 *	   occurred
2269 */
2270static inline int crypto_hash_update(struct hash_desc *desc,
2271				     struct scatterlist *sg,
2272				     unsigned int nbytes)
2273{
2274	return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
2275}
2276
2277/**
2278 * crypto_hash_final() - calculate message digest
2279 * @desc: cipher request handle
2280 * @out: message digest output buffer -- The caller must ensure that the out
2281 *	 buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
2282 *	 function).
2283 *
2284 * Finalize the message digest operation and create the message digest
2285 * based on all data added to the cipher handle. The message digest is placed
2286 * into the output buffer.
2287 *
2288 * Return: 0 if the message digest creation was successful; < 0 if an error
2289 *	   occurred
2290 */
2291static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
2292{
2293	return crypto_hash_crt(desc->tfm)->final(desc, out);
2294}
2295
2296/**
2297 * crypto_hash_digest() - calculate message digest for a buffer
2298 * @desc: see crypto_hash_final()
2299 * @sg: see crypto_hash_update()
2300 * @nbytes:  see crypto_hash_update()
2301 * @out: see crypto_hash_final()
2302 *
2303 * This function is a "short-hand" for the function calls of crypto_hash_init,
2304 * crypto_hash_update and crypto_hash_final. The parameters have the same
2305 * meaning as discussed for those separate three functions.
2306 *
2307 * Return: 0 if the message digest creation was successful; < 0 if an error
2308 *	   occurred
2309 */
2310static inline int crypto_hash_digest(struct hash_desc *desc,
2311				     struct scatterlist *sg,
2312				     unsigned int nbytes, u8 *out)
2313{
2314	return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
2315}
2316
2317/**
2318 * crypto_hash_setkey() - set key for message digest
2319 * @hash: cipher handle
2320 * @key: buffer holding the key
2321 * @keylen: length of the key in bytes
2322 *
2323 * The caller provided key is set for the message digest cipher. The cipher
2324 * handle must point to a keyed hash in order for this function to succeed.
2325 *
2326 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2327 */
2328static inline int crypto_hash_setkey(struct crypto_hash *hash,
2329				     const u8 *key, unsigned int keylen)
2330{
2331	return crypto_hash_crt(hash)->setkey(hash, key, keylen);
2332}
2333
2334static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
2335{
2336	return (struct crypto_comp *)tfm;
2337}
2338
2339static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
2340{
2341	BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
2342	       CRYPTO_ALG_TYPE_MASK);
2343	return __crypto_comp_cast(tfm);
2344}
2345
2346static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
2347						    u32 type, u32 mask)
2348{
2349	type &= ~CRYPTO_ALG_TYPE_MASK;
2350	type |= CRYPTO_ALG_TYPE_COMPRESS;
2351	mask |= CRYPTO_ALG_TYPE_MASK;
2352
2353	return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
2354}
2355
2356static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
2357{
2358	return &tfm->base;
2359}
2360
2361static inline void crypto_free_comp(struct crypto_comp *tfm)
2362{
2363	crypto_free_tfm(crypto_comp_tfm(tfm));
2364}
2365
2366static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
2367{
2368	type &= ~CRYPTO_ALG_TYPE_MASK;
2369	type |= CRYPTO_ALG_TYPE_COMPRESS;
2370	mask |= CRYPTO_ALG_TYPE_MASK;
2371
2372	return crypto_has_alg(alg_name, type, mask);
2373}
2374
2375static inline const char *crypto_comp_name(struct crypto_comp *tfm)
2376{
2377	return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
2378}
2379
2380static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
2381{
2382	return &crypto_comp_tfm(tfm)->crt_compress;
2383}
2384
2385static inline int crypto_comp_compress(struct crypto_comp *tfm,
2386                                       const u8 *src, unsigned int slen,
2387                                       u8 *dst, unsigned int *dlen)
2388{
2389	return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
2390						  src, slen, dst, dlen);
2391}
2392
2393static inline int crypto_comp_decompress(struct crypto_comp *tfm,
2394                                         const u8 *src, unsigned int slen,
2395                                         u8 *dst, unsigned int *dlen)
2396{
2397	return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
2398						    src, slen, dst, dlen);
2399}
2400
2401#endif	/* _LINUX_CRYPTO_H */
2402
2403