root/crypto/algif_aead.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. aead_sufficient_data
  2. aead_sendmsg
  3. crypto_aead_copy_sgl
  4. _aead_recvmsg
  5. aead_recvmsg
  6. aead_check_key
  7. aead_sendmsg_nokey
  8. aead_sendpage_nokey
  9. aead_recvmsg_nokey
  10. aead_bind
  11. aead_release
  12. aead_setauthsize
  13. aead_setkey
  14. aead_sock_destruct
  15. aead_accept_parent_nokey
  16. aead_accept_parent
  17. algif_aead_init
  18. algif_aead_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * algif_aead: User-space interface for AEAD algorithms
   4  *
   5  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
   6  *
   7  * This file provides the user-space API for AEAD ciphers.
   8  *
   9  * The following concept of the memory management is used:
  10  *
  11  * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
  12  * filled by user space with the data submitted via sendpage/sendmsg. Filling
  13  * up the TX SGL does not cause a crypto operation -- the data will only be
  14  * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
  15  * provide a buffer which is tracked with the RX SGL.
  16  *
  17  * During the processing of the recvmsg operation, the cipher request is
  18  * allocated and prepared. As part of the recvmsg operation, the processed
  19  * TX buffers are extracted from the TX SGL into a separate SGL.
  20  *
  21  * After the completion of the crypto operation, the RX SGL and the cipher
  22  * request is released. The extracted TX SGL parts are released together with
  23  * the RX SGL release.
  24  */
  25 
  26 #include <crypto/internal/aead.h>
  27 #include <crypto/scatterwalk.h>
  28 #include <crypto/if_alg.h>
  29 #include <crypto/skcipher.h>
  30 #include <crypto/null.h>
  31 #include <linux/init.h>
  32 #include <linux/list.h>
  33 #include <linux/kernel.h>
  34 #include <linux/mm.h>
  35 #include <linux/module.h>
  36 #include <linux/net.h>
  37 #include <net/sock.h>
  38 
  39 struct aead_tfm {
  40         struct crypto_aead *aead;
  41         struct crypto_sync_skcipher *null_tfm;
  42 };
  43 
  44 static inline bool aead_sufficient_data(struct sock *sk)
  45 {
  46         struct alg_sock *ask = alg_sk(sk);
  47         struct sock *psk = ask->parent;
  48         struct alg_sock *pask = alg_sk(psk);
  49         struct af_alg_ctx *ctx = ask->private;
  50         struct aead_tfm *aeadc = pask->private;
  51         struct crypto_aead *tfm = aeadc->aead;
  52         unsigned int as = crypto_aead_authsize(tfm);
  53 
  54         /*
  55          * The minimum amount of memory needed for an AEAD cipher is
  56          * the AAD and in case of decryption the tag.
  57          */
  58         return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
  59 }
  60 
  61 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  62 {
  63         struct sock *sk = sock->sk;
  64         struct alg_sock *ask = alg_sk(sk);
  65         struct sock *psk = ask->parent;
  66         struct alg_sock *pask = alg_sk(psk);
  67         struct aead_tfm *aeadc = pask->private;
  68         struct crypto_aead *tfm = aeadc->aead;
  69         unsigned int ivsize = crypto_aead_ivsize(tfm);
  70 
  71         return af_alg_sendmsg(sock, msg, size, ivsize);
  72 }
  73 
  74 static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
  75                                 struct scatterlist *src,
  76                                 struct scatterlist *dst, unsigned int len)
  77 {
  78         SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
  79 
  80         skcipher_request_set_sync_tfm(skreq, null_tfm);
  81         skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
  82                                       NULL, NULL);
  83         skcipher_request_set_crypt(skreq, src, dst, len, NULL);
  84 
  85         return crypto_skcipher_encrypt(skreq);
  86 }
  87 
  88 static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
  89                          size_t ignored, int flags)
  90 {
  91         struct sock *sk = sock->sk;
  92         struct alg_sock *ask = alg_sk(sk);
  93         struct sock *psk = ask->parent;
  94         struct alg_sock *pask = alg_sk(psk);
  95         struct af_alg_ctx *ctx = ask->private;
  96         struct aead_tfm *aeadc = pask->private;
  97         struct crypto_aead *tfm = aeadc->aead;
  98         struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
  99         unsigned int i, as = crypto_aead_authsize(tfm);
 100         struct af_alg_async_req *areq;
 101         struct af_alg_tsgl *tsgl, *tmp;
 102         struct scatterlist *rsgl_src, *tsgl_src = NULL;
 103         int err = 0;
 104         size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
 105         size_t outlen = 0;              /* [out] RX bufs produced by kernel */
 106         size_t usedpages = 0;           /* [in]  RX bufs to be used from user */
 107         size_t processed = 0;           /* [in]  TX bufs to be consumed */
 108 
 109         if (!ctx->used) {
 110                 err = af_alg_wait_for_data(sk, flags);
 111                 if (err)
 112                         return err;
 113         }
 114 
 115         /*
 116          * Data length provided by caller via sendmsg/sendpage that has not
 117          * yet been processed.
 118          */
 119         used = ctx->used;
 120 
 121         /*
 122          * Make sure sufficient data is present -- note, the same check is
 123          * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
 124          * shall provide an information to the data sender that something is
 125          * wrong, but they are irrelevant to maintain the kernel integrity.
 126          * We need this check here too in case user space decides to not honor
 127          * the error message in sendmsg/sendpage and still call recvmsg. This
 128          * check here protects the kernel integrity.
 129          */
 130         if (!aead_sufficient_data(sk))
 131                 return -EINVAL;
 132 
 133         /*
 134          * Calculate the minimum output buffer size holding the result of the
 135          * cipher operation. When encrypting data, the receiving buffer is
 136          * larger by the tag length compared to the input buffer as the
 137          * encryption operation generates the tag. For decryption, the input
 138          * buffer provides the tag which is consumed resulting in only the
 139          * plaintext without a buffer for the tag returned to the caller.
 140          */
 141         if (ctx->enc)
 142                 outlen = used + as;
 143         else
 144                 outlen = used - as;
 145 
 146         /*
 147          * The cipher operation input data is reduced by the associated data
 148          * length as this data is processed separately later on.
 149          */
 150         used -= ctx->aead_assoclen;
 151 
 152         /* Allocate cipher request for current operation. */
 153         areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
 154                                      crypto_aead_reqsize(tfm));
 155         if (IS_ERR(areq))
 156                 return PTR_ERR(areq);
 157 
 158         /* convert iovecs of output buffers into RX SGL */
 159         err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
 160         if (err)
 161                 goto free;
 162 
 163         /*
 164          * Ensure output buffer is sufficiently large. If the caller provides
 165          * less buffer space, only use the relative required input size. This
 166          * allows AIO operation where the caller sent all data to be processed
 167          * and the AIO operation performs the operation on the different chunks
 168          * of the input data.
 169          */
 170         if (usedpages < outlen) {
 171                 size_t less = outlen - usedpages;
 172 
 173                 if (used < less) {
 174                         err = -EINVAL;
 175                         goto free;
 176                 }
 177                 used -= less;
 178                 outlen -= less;
 179         }
 180 
 181         processed = used + ctx->aead_assoclen;
 182         list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
 183                 for (i = 0; i < tsgl->cur; i++) {
 184                         struct scatterlist *process_sg = tsgl->sg + i;
 185 
 186                         if (!(process_sg->length) || !sg_page(process_sg))
 187                                 continue;
 188                         tsgl_src = process_sg;
 189                         break;
 190                 }
 191                 if (tsgl_src)
 192                         break;
 193         }
 194         if (processed && !tsgl_src) {
 195                 err = -EFAULT;
 196                 goto free;
 197         }
 198 
 199         /*
 200          * Copy of AAD from source to destination
 201          *
 202          * The AAD is copied to the destination buffer without change. Even
 203          * when user space uses an in-place cipher operation, the kernel
 204          * will copy the data as it does not see whether such in-place operation
 205          * is initiated.
 206          *
 207          * To ensure efficiency, the following implementation ensure that the
 208          * ciphers are invoked to perform a crypto operation in-place. This
 209          * is achieved by memory management specified as follows.
 210          */
 211 
 212         /* Use the RX SGL as source (and destination) for crypto op. */
 213         rsgl_src = areq->first_rsgl.sgl.sg;
 214 
 215         if (ctx->enc) {
 216                 /*
 217                  * Encryption operation - The in-place cipher operation is
 218                  * achieved by the following operation:
 219                  *
 220                  * TX SGL: AAD || PT
 221                  *          |      |
 222                  *          | copy |
 223                  *          v      v
 224                  * RX SGL: AAD || PT || Tag
 225                  */
 226                 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
 227                                            areq->first_rsgl.sgl.sg, processed);
 228                 if (err)
 229                         goto free;
 230                 af_alg_pull_tsgl(sk, processed, NULL, 0);
 231         } else {
 232                 /*
 233                  * Decryption operation - To achieve an in-place cipher
 234                  * operation, the following  SGL structure is used:
 235                  *
 236                  * TX SGL: AAD || CT || Tag
 237                  *          |      |     ^
 238                  *          | copy |     | Create SGL link.
 239                  *          v      v     |
 240                  * RX SGL: AAD || CT ----+
 241                  */
 242 
 243                  /* Copy AAD || CT to RX SGL buffer for in-place operation. */
 244                 err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
 245                                            areq->first_rsgl.sgl.sg, outlen);
 246                 if (err)
 247                         goto free;
 248 
 249                 /* Create TX SGL for tag and chain it to RX SGL. */
 250                 areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
 251                                                        processed - as);
 252                 if (!areq->tsgl_entries)
 253                         areq->tsgl_entries = 1;
 254                 areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
 255                                                          areq->tsgl_entries),
 256                                           GFP_KERNEL);
 257                 if (!areq->tsgl) {
 258                         err = -ENOMEM;
 259                         goto free;
 260                 }
 261                 sg_init_table(areq->tsgl, areq->tsgl_entries);
 262 
 263                 /* Release TX SGL, except for tag data and reassign tag data. */
 264                 af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
 265 
 266                 /* chain the areq TX SGL holding the tag with RX SGL */
 267                 if (usedpages) {
 268                         /* RX SGL present */
 269                         struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
 270 
 271                         sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
 272                         sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
 273                                  areq->tsgl);
 274                 } else
 275                         /* no RX SGL present (e.g. authentication only) */
 276                         rsgl_src = areq->tsgl;
 277         }
 278 
 279         /* Initialize the crypto operation */
 280         aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
 281                                areq->first_rsgl.sgl.sg, used, ctx->iv);
 282         aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
 283         aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
 284 
 285         if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
 286                 /* AIO operation */
 287                 sock_hold(sk);
 288                 areq->iocb = msg->msg_iocb;
 289 
 290                 /* Remember output size that will be generated. */
 291                 areq->outlen = outlen;
 292 
 293                 aead_request_set_callback(&areq->cra_u.aead_req,
 294                                           CRYPTO_TFM_REQ_MAY_BACKLOG,
 295                                           af_alg_async_cb, areq);
 296                 err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
 297                                  crypto_aead_decrypt(&areq->cra_u.aead_req);
 298 
 299                 /* AIO operation in progress */
 300                 if (err == -EINPROGRESS || err == -EBUSY)
 301                         return -EIOCBQUEUED;
 302 
 303                 sock_put(sk);
 304         } else {
 305                 /* Synchronous operation */
 306                 aead_request_set_callback(&areq->cra_u.aead_req,
 307                                           CRYPTO_TFM_REQ_MAY_BACKLOG,
 308                                           crypto_req_done, &ctx->wait);
 309                 err = crypto_wait_req(ctx->enc ?
 310                                 crypto_aead_encrypt(&areq->cra_u.aead_req) :
 311                                 crypto_aead_decrypt(&areq->cra_u.aead_req),
 312                                 &ctx->wait);
 313         }
 314 
 315 
 316 free:
 317         af_alg_free_resources(areq);
 318 
 319         return err ? err : outlen;
 320 }
 321 
 322 static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
 323                         size_t ignored, int flags)
 324 {
 325         struct sock *sk = sock->sk;
 326         int ret = 0;
 327 
 328         lock_sock(sk);
 329         while (msg_data_left(msg)) {
 330                 int err = _aead_recvmsg(sock, msg, ignored, flags);
 331 
 332                 /*
 333                  * This error covers -EIOCBQUEUED which implies that we can
 334                  * only handle one AIO request. If the caller wants to have
 335                  * multiple AIO requests in parallel, he must make multiple
 336                  * separate AIO calls.
 337                  *
 338                  * Also return the error if no data has been processed so far.
 339                  */
 340                 if (err <= 0) {
 341                         if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
 342                                 ret = err;
 343                         goto out;
 344                 }
 345 
 346                 ret += err;
 347         }
 348 
 349 out:
 350         af_alg_wmem_wakeup(sk);
 351         release_sock(sk);
 352         return ret;
 353 }
 354 
 355 static struct proto_ops algif_aead_ops = {
 356         .family         =       PF_ALG,
 357 
 358         .connect        =       sock_no_connect,
 359         .socketpair     =       sock_no_socketpair,
 360         .getname        =       sock_no_getname,
 361         .ioctl          =       sock_no_ioctl,
 362         .listen         =       sock_no_listen,
 363         .shutdown       =       sock_no_shutdown,
 364         .getsockopt     =       sock_no_getsockopt,
 365         .mmap           =       sock_no_mmap,
 366         .bind           =       sock_no_bind,
 367         .accept         =       sock_no_accept,
 368         .setsockopt     =       sock_no_setsockopt,
 369 
 370         .release        =       af_alg_release,
 371         .sendmsg        =       aead_sendmsg,
 372         .sendpage       =       af_alg_sendpage,
 373         .recvmsg        =       aead_recvmsg,
 374         .poll           =       af_alg_poll,
 375 };
 376 
 377 static int aead_check_key(struct socket *sock)
 378 {
 379         int err = 0;
 380         struct sock *psk;
 381         struct alg_sock *pask;
 382         struct aead_tfm *tfm;
 383         struct sock *sk = sock->sk;
 384         struct alg_sock *ask = alg_sk(sk);
 385 
 386         lock_sock(sk);
 387         if (ask->refcnt)
 388                 goto unlock_child;
 389 
 390         psk = ask->parent;
 391         pask = alg_sk(ask->parent);
 392         tfm = pask->private;
 393 
 394         err = -ENOKEY;
 395         lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
 396         if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
 397                 goto unlock;
 398 
 399         if (!pask->refcnt++)
 400                 sock_hold(psk);
 401 
 402         ask->refcnt = 1;
 403         sock_put(psk);
 404 
 405         err = 0;
 406 
 407 unlock:
 408         release_sock(psk);
 409 unlock_child:
 410         release_sock(sk);
 411 
 412         return err;
 413 }
 414 
 415 static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
 416                                   size_t size)
 417 {
 418         int err;
 419 
 420         err = aead_check_key(sock);
 421         if (err)
 422                 return err;
 423 
 424         return aead_sendmsg(sock, msg, size);
 425 }
 426 
 427 static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
 428                                        int offset, size_t size, int flags)
 429 {
 430         int err;
 431 
 432         err = aead_check_key(sock);
 433         if (err)
 434                 return err;
 435 
 436         return af_alg_sendpage(sock, page, offset, size, flags);
 437 }
 438 
 439 static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
 440                                   size_t ignored, int flags)
 441 {
 442         int err;
 443 
 444         err = aead_check_key(sock);
 445         if (err)
 446                 return err;
 447 
 448         return aead_recvmsg(sock, msg, ignored, flags);
 449 }
 450 
 451 static struct proto_ops algif_aead_ops_nokey = {
 452         .family         =       PF_ALG,
 453 
 454         .connect        =       sock_no_connect,
 455         .socketpair     =       sock_no_socketpair,
 456         .getname        =       sock_no_getname,
 457         .ioctl          =       sock_no_ioctl,
 458         .listen         =       sock_no_listen,
 459         .shutdown       =       sock_no_shutdown,
 460         .getsockopt     =       sock_no_getsockopt,
 461         .mmap           =       sock_no_mmap,
 462         .bind           =       sock_no_bind,
 463         .accept         =       sock_no_accept,
 464         .setsockopt     =       sock_no_setsockopt,
 465 
 466         .release        =       af_alg_release,
 467         .sendmsg        =       aead_sendmsg_nokey,
 468         .sendpage       =       aead_sendpage_nokey,
 469         .recvmsg        =       aead_recvmsg_nokey,
 470         .poll           =       af_alg_poll,
 471 };
 472 
 473 static void *aead_bind(const char *name, u32 type, u32 mask)
 474 {
 475         struct aead_tfm *tfm;
 476         struct crypto_aead *aead;
 477         struct crypto_sync_skcipher *null_tfm;
 478 
 479         tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
 480         if (!tfm)
 481                 return ERR_PTR(-ENOMEM);
 482 
 483         aead = crypto_alloc_aead(name, type, mask);
 484         if (IS_ERR(aead)) {
 485                 kfree(tfm);
 486                 return ERR_CAST(aead);
 487         }
 488 
 489         null_tfm = crypto_get_default_null_skcipher();
 490         if (IS_ERR(null_tfm)) {
 491                 crypto_free_aead(aead);
 492                 kfree(tfm);
 493                 return ERR_CAST(null_tfm);
 494         }
 495 
 496         tfm->aead = aead;
 497         tfm->null_tfm = null_tfm;
 498 
 499         return tfm;
 500 }
 501 
 502 static void aead_release(void *private)
 503 {
 504         struct aead_tfm *tfm = private;
 505 
 506         crypto_free_aead(tfm->aead);
 507         crypto_put_default_null_skcipher();
 508         kfree(tfm);
 509 }
 510 
 511 static int aead_setauthsize(void *private, unsigned int authsize)
 512 {
 513         struct aead_tfm *tfm = private;
 514 
 515         return crypto_aead_setauthsize(tfm->aead, authsize);
 516 }
 517 
 518 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
 519 {
 520         struct aead_tfm *tfm = private;
 521 
 522         return crypto_aead_setkey(tfm->aead, key, keylen);
 523 }
 524 
 525 static void aead_sock_destruct(struct sock *sk)
 526 {
 527         struct alg_sock *ask = alg_sk(sk);
 528         struct af_alg_ctx *ctx = ask->private;
 529         struct sock *psk = ask->parent;
 530         struct alg_sock *pask = alg_sk(psk);
 531         struct aead_tfm *aeadc = pask->private;
 532         struct crypto_aead *tfm = aeadc->aead;
 533         unsigned int ivlen = crypto_aead_ivsize(tfm);
 534 
 535         af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
 536         sock_kzfree_s(sk, ctx->iv, ivlen);
 537         sock_kfree_s(sk, ctx, ctx->len);
 538         af_alg_release_parent(sk);
 539 }
 540 
 541 static int aead_accept_parent_nokey(void *private, struct sock *sk)
 542 {
 543         struct af_alg_ctx *ctx;
 544         struct alg_sock *ask = alg_sk(sk);
 545         struct aead_tfm *tfm = private;
 546         struct crypto_aead *aead = tfm->aead;
 547         unsigned int len = sizeof(*ctx);
 548         unsigned int ivlen = crypto_aead_ivsize(aead);
 549 
 550         ctx = sock_kmalloc(sk, len, GFP_KERNEL);
 551         if (!ctx)
 552                 return -ENOMEM;
 553         memset(ctx, 0, len);
 554 
 555         ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
 556         if (!ctx->iv) {
 557                 sock_kfree_s(sk, ctx, len);
 558                 return -ENOMEM;
 559         }
 560         memset(ctx->iv, 0, ivlen);
 561 
 562         INIT_LIST_HEAD(&ctx->tsgl_list);
 563         ctx->len = len;
 564         ctx->used = 0;
 565         atomic_set(&ctx->rcvused, 0);
 566         ctx->more = 0;
 567         ctx->merge = 0;
 568         ctx->enc = 0;
 569         ctx->aead_assoclen = 0;
 570         crypto_init_wait(&ctx->wait);
 571 
 572         ask->private = ctx;
 573 
 574         sk->sk_destruct = aead_sock_destruct;
 575 
 576         return 0;
 577 }
 578 
 579 static int aead_accept_parent(void *private, struct sock *sk)
 580 {
 581         struct aead_tfm *tfm = private;
 582 
 583         if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
 584                 return -ENOKEY;
 585 
 586         return aead_accept_parent_nokey(private, sk);
 587 }
 588 
 589 static const struct af_alg_type algif_type_aead = {
 590         .bind           =       aead_bind,
 591         .release        =       aead_release,
 592         .setkey         =       aead_setkey,
 593         .setauthsize    =       aead_setauthsize,
 594         .accept         =       aead_accept_parent,
 595         .accept_nokey   =       aead_accept_parent_nokey,
 596         .ops            =       &algif_aead_ops,
 597         .ops_nokey      =       &algif_aead_ops_nokey,
 598         .name           =       "aead",
 599         .owner          =       THIS_MODULE
 600 };
 601 
 602 static int __init algif_aead_init(void)
 603 {
 604         return af_alg_register_type(&algif_type_aead);
 605 }
 606 
 607 static void __exit algif_aead_exit(void)
 608 {
 609         int err = af_alg_unregister_type(&algif_type_aead);
 610         BUG_ON(err);
 611 }
 612 
 613 module_init(algif_aead_init);
 614 module_exit(algif_aead_exit);
 615 MODULE_LICENSE("GPL");
 616 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
 617 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");

/* [<][>][^][v][top][bottom][index][help] */