root/drivers/crypto/mediatek/mtk-sha.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mtk_sha_read
  2. mtk_sha_write
  3. mtk_sha_ring_shift
  4. mtk_sha_find_dev
  5. mtk_sha_append_sg
  6. mtk_sha_fill_padding
  7. mtk_sha_info_init
  8. mtk_sha_info_update
  9. mtk_sha_finish_hmac
  10. mtk_sha_init
  11. mtk_sha_xmit
  12. mtk_sha_dma_map
  13. mtk_sha_update_slow
  14. mtk_sha_update_start
  15. mtk_sha_final_req
  16. mtk_sha_finish
  17. mtk_sha_finish_req
  18. mtk_sha_handle_queue
  19. mtk_sha_enqueue
  20. mtk_sha_unmap
  21. mtk_sha_complete
  22. mtk_sha_update
  23. mtk_sha_final
  24. mtk_sha_finup
  25. mtk_sha_digest
  26. mtk_sha_setkey
  27. mtk_sha_export
  28. mtk_sha_import
  29. mtk_sha_cra_init_alg
  30. mtk_sha_cra_init
  31. mtk_sha_cra_sha1_init
  32. mtk_sha_cra_sha224_init
  33. mtk_sha_cra_sha256_init
  34. mtk_sha_cra_sha384_init
  35. mtk_sha_cra_sha512_init
  36. mtk_sha_cra_exit
  37. mtk_sha_queue_task
  38. mtk_sha_done_task
  39. mtk_sha_irq
  40. mtk_sha_record_init
  41. mtk_sha_record_free
  42. mtk_sha_unregister_algs
  43. mtk_sha_register_algs
  44. mtk_hash_alg_register
  45. mtk_hash_alg_release

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Cryptographic API.
   4  *
   5  * Driver for EIP97 SHA1/SHA2(HMAC) acceleration.
   6  *
   7  * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
   8  *
   9  * Some ideas are from atmel-sha.c and omap-sham.c drivers.
  10  */
  11 
  12 #include <crypto/hmac.h>
  13 #include <crypto/sha.h>
  14 #include "mtk-platform.h"
  15 
  16 #define SHA_ALIGN_MSK           (sizeof(u32) - 1)
  17 #define SHA_QUEUE_SIZE          512
  18 #define SHA_BUF_SIZE            ((u32)PAGE_SIZE)
  19 
  20 #define SHA_OP_UPDATE           1
  21 #define SHA_OP_FINAL            2
  22 
  23 #define SHA_DATA_LEN_MSK        cpu_to_le32(GENMASK(16, 0))
  24 #define SHA_MAX_DIGEST_BUF_SIZE 32
  25 
  26 /* SHA command token */
  27 #define SHA_CT_SIZE             5
  28 #define SHA_CT_CTRL_HDR         cpu_to_le32(0x02220000)
  29 #define SHA_CMD0                cpu_to_le32(0x03020000)
  30 #define SHA_CMD1                cpu_to_le32(0x21060000)
  31 #define SHA_CMD2                cpu_to_le32(0xe0e63802)
  32 
  33 /* SHA transform information */
  34 #define SHA_TFM_HASH            cpu_to_le32(0x2 << 0)
  35 #define SHA_TFM_SIZE(x)         cpu_to_le32((x) << 8)
  36 #define SHA_TFM_START           cpu_to_le32(0x1 << 4)
  37 #define SHA_TFM_CONTINUE        cpu_to_le32(0x1 << 5)
  38 #define SHA_TFM_HASH_STORE      cpu_to_le32(0x1 << 19)
  39 #define SHA_TFM_SHA1            cpu_to_le32(0x2 << 23)
  40 #define SHA_TFM_SHA256          cpu_to_le32(0x3 << 23)
  41 #define SHA_TFM_SHA224          cpu_to_le32(0x4 << 23)
  42 #define SHA_TFM_SHA512          cpu_to_le32(0x5 << 23)
  43 #define SHA_TFM_SHA384          cpu_to_le32(0x6 << 23)
  44 #define SHA_TFM_DIGEST(x)       cpu_to_le32(((x) & GENMASK(3, 0)) << 24)
  45 
  46 /* SHA flags */
  47 #define SHA_FLAGS_BUSY          BIT(0)
  48 #define SHA_FLAGS_FINAL         BIT(1)
  49 #define SHA_FLAGS_FINUP         BIT(2)
  50 #define SHA_FLAGS_SG            BIT(3)
  51 #define SHA_FLAGS_ALGO_MSK      GENMASK(8, 4)
  52 #define SHA_FLAGS_SHA1          BIT(4)
  53 #define SHA_FLAGS_SHA224        BIT(5)
  54 #define SHA_FLAGS_SHA256        BIT(6)
  55 #define SHA_FLAGS_SHA384        BIT(7)
  56 #define SHA_FLAGS_SHA512        BIT(8)
  57 #define SHA_FLAGS_HMAC          BIT(9)
  58 #define SHA_FLAGS_PAD           BIT(10)
  59 
  60 /**
  61  * mtk_sha_info - hardware information of AES
  62  * @cmd:        command token, hardware instruction
  63  * @tfm:        transform state of cipher algorithm.
  64  * @state:      contains keys and initial vectors.
  65  *
  66  */
  67 struct mtk_sha_info {
  68         __le32 ctrl[2];
  69         __le32 cmd[3];
  70         __le32 tfm[2];
  71         __le32 digest[SHA_MAX_DIGEST_BUF_SIZE];
  72 };
  73 
  74 struct mtk_sha_reqctx {
  75         struct mtk_sha_info info;
  76         unsigned long flags;
  77         unsigned long op;
  78 
  79         u64 digcnt;
  80         size_t bufcnt;
  81         dma_addr_t dma_addr;
  82 
  83         __le32 ct_hdr;
  84         u32 ct_size;
  85         dma_addr_t ct_dma;
  86         dma_addr_t tfm_dma;
  87 
  88         /* Walk state */
  89         struct scatterlist *sg;
  90         u32 offset;     /* Offset in current sg */
  91         u32 total;      /* Total request */
  92         size_t ds;
  93         size_t bs;
  94 
  95         u8 *buffer;
  96 };
  97 
  98 struct mtk_sha_hmac_ctx {
  99         struct crypto_shash     *shash;
 100         u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 101         u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 102 };
 103 
 104 struct mtk_sha_ctx {
 105         struct mtk_cryp *cryp;
 106         unsigned long flags;
 107         u8 id;
 108         u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32));
 109 
 110         struct mtk_sha_hmac_ctx base[0];
 111 };
 112 
 113 struct mtk_sha_drv {
 114         struct list_head dev_list;
 115         /* Device list lock */
 116         spinlock_t lock;
 117 };
 118 
 119 static struct mtk_sha_drv mtk_sha = {
 120         .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list),
 121         .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock),
 122 };
 123 
 124 static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 125                                 struct ahash_request *req);
 126 
 127 static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset)
 128 {
 129         return readl_relaxed(cryp->base + offset);
 130 }
 131 
 132 static inline void mtk_sha_write(struct mtk_cryp *cryp,
 133                                  u32 offset, u32 value)
 134 {
 135         writel_relaxed(value, cryp->base + offset);
 136 }
 137 
 138 static inline void mtk_sha_ring_shift(struct mtk_ring *ring,
 139                                       struct mtk_desc **cmd_curr,
 140                                       struct mtk_desc **res_curr,
 141                                       int *count)
 142 {
 143         *cmd_curr = ring->cmd_next++;
 144         *res_curr = ring->res_next++;
 145         (*count)++;
 146 
 147         if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) {
 148                 ring->cmd_next = ring->cmd_base;
 149                 ring->res_next = ring->res_base;
 150         }
 151 }
 152 
 153 static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx)
 154 {
 155         struct mtk_cryp *cryp = NULL;
 156         struct mtk_cryp *tmp;
 157 
 158         spin_lock_bh(&mtk_sha.lock);
 159         if (!tctx->cryp) {
 160                 list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) {
 161                         cryp = tmp;
 162                         break;
 163                 }
 164                 tctx->cryp = cryp;
 165         } else {
 166                 cryp = tctx->cryp;
 167         }
 168 
 169         /*
 170          * Assign record id to tfm in round-robin fashion, and this
 171          * will help tfm to bind  to corresponding descriptor rings.
 172          */
 173         tctx->id = cryp->rec;
 174         cryp->rec = !cryp->rec;
 175 
 176         spin_unlock_bh(&mtk_sha.lock);
 177 
 178         return cryp;
 179 }
 180 
 181 static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx)
 182 {
 183         size_t count;
 184 
 185         while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) {
 186                 count = min(ctx->sg->length - ctx->offset, ctx->total);
 187                 count = min(count, SHA_BUF_SIZE - ctx->bufcnt);
 188 
 189                 if (count <= 0) {
 190                         /*
 191                          * Check if count <= 0 because the buffer is full or
 192                          * because the sg length is 0. In the latest case,
 193                          * check if there is another sg in the list, a 0 length
 194                          * sg doesn't necessarily mean the end of the sg list.
 195                          */
 196                         if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 197                                 ctx->sg = sg_next(ctx->sg);
 198                                 continue;
 199                         } else {
 200                                 break;
 201                         }
 202                 }
 203 
 204                 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 205                                          ctx->offset, count, 0);
 206 
 207                 ctx->bufcnt += count;
 208                 ctx->offset += count;
 209                 ctx->total -= count;
 210 
 211                 if (ctx->offset == ctx->sg->length) {
 212                         ctx->sg = sg_next(ctx->sg);
 213                         if (ctx->sg)
 214                                 ctx->offset = 0;
 215                         else
 216                                 ctx->total = 0;
 217                 }
 218         }
 219 
 220         return 0;
 221 }
 222 
 223 /*
 224  * The purpose of this padding is to ensure that the padded message is a
 225  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 226  * The bit "1" is appended at the end of the message followed by
 227  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 228  * 128 bits block (SHA384/SHA512) equals to the message length in bits
 229  * is appended.
 230  *
 231  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 232  *  - if message length < 56 bytes then padlen = 56 - message length
 233  *  - else padlen = 64 + 56 - message length
 234  *
 235  * For SHA384/SHA512, padlen is calculated as followed:
 236  *  - if message length < 112 bytes then padlen = 112 - message length
 237  *  - else padlen = 128 + 112 - message length
 238  */
 239 static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len)
 240 {
 241         u32 index, padlen;
 242         u64 bits[2];
 243         u64 size = ctx->digcnt;
 244 
 245         size += ctx->bufcnt;
 246         size += len;
 247 
 248         bits[1] = cpu_to_be64(size << 3);
 249         bits[0] = cpu_to_be64(size >> 61);
 250 
 251         switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 252         case SHA_FLAGS_SHA384:
 253         case SHA_FLAGS_SHA512:
 254                 index = ctx->bufcnt & 0x7f;
 255                 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
 256                 *(ctx->buffer + ctx->bufcnt) = 0x80;
 257                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 258                 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 259                 ctx->bufcnt += padlen + 16;
 260                 ctx->flags |= SHA_FLAGS_PAD;
 261                 break;
 262 
 263         default:
 264                 index = ctx->bufcnt & 0x3f;
 265                 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
 266                 *(ctx->buffer + ctx->bufcnt) = 0x80;
 267                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1);
 268                 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 269                 ctx->bufcnt += padlen + 8;
 270                 ctx->flags |= SHA_FLAGS_PAD;
 271                 break;
 272         }
 273 }
 274 
 275 /* Initialize basic transform information of SHA */
 276 static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx)
 277 {
 278         struct mtk_sha_info *info = &ctx->info;
 279 
 280         ctx->ct_hdr = SHA_CT_CTRL_HDR;
 281         ctx->ct_size = SHA_CT_SIZE;
 282 
 283         info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds));
 284 
 285         switch (ctx->flags & SHA_FLAGS_ALGO_MSK) {
 286         case SHA_FLAGS_SHA1:
 287                 info->tfm[0] |= SHA_TFM_SHA1;
 288                 break;
 289         case SHA_FLAGS_SHA224:
 290                 info->tfm[0] |= SHA_TFM_SHA224;
 291                 break;
 292         case SHA_FLAGS_SHA256:
 293                 info->tfm[0] |= SHA_TFM_SHA256;
 294                 break;
 295         case SHA_FLAGS_SHA384:
 296                 info->tfm[0] |= SHA_TFM_SHA384;
 297                 break;
 298         case SHA_FLAGS_SHA512:
 299                 info->tfm[0] |= SHA_TFM_SHA512;
 300                 break;
 301 
 302         default:
 303                 /* Should not happen... */
 304                 return;
 305         }
 306 
 307         info->tfm[1] = SHA_TFM_HASH_STORE;
 308         info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START;
 309         info->ctrl[1] = info->tfm[1];
 310 
 311         info->cmd[0] = SHA_CMD0;
 312         info->cmd[1] = SHA_CMD1;
 313         info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds));
 314 }
 315 
 316 /*
 317  * Update input data length field of transform information and
 318  * map it to DMA region.
 319  */
 320 static int mtk_sha_info_update(struct mtk_cryp *cryp,
 321                                struct mtk_sha_rec *sha,
 322                                size_t len1, size_t len2)
 323 {
 324         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 325         struct mtk_sha_info *info = &ctx->info;
 326 
 327         ctx->ct_hdr &= ~SHA_DATA_LEN_MSK;
 328         ctx->ct_hdr |= cpu_to_le32(len1 + len2);
 329         info->cmd[0] &= ~SHA_DATA_LEN_MSK;
 330         info->cmd[0] |= cpu_to_le32(len1 + len2);
 331 
 332         /* Setting SHA_TFM_START only for the first iteration */
 333         if (ctx->digcnt)
 334                 info->ctrl[0] &= ~SHA_TFM_START;
 335 
 336         ctx->digcnt += len1;
 337 
 338         ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
 339                                      DMA_BIDIRECTIONAL);
 340         if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) {
 341                 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info));
 342                 return -EINVAL;
 343         }
 344 
 345         ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd);
 346 
 347         return 0;
 348 }
 349 
 350 /*
 351  * Because of hardware limitation, we must pre-calculate the inner
 352  * and outer digest that need to be processed firstly by engine, then
 353  * apply the result digest to the input message. These complex hashing
 354  * procedures limits HMAC performance, so we use fallback SW encoding.
 355  */
 356 static int mtk_sha_finish_hmac(struct ahash_request *req)
 357 {
 358         struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 359         struct mtk_sha_hmac_ctx *bctx = tctx->base;
 360         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 361 
 362         SHASH_DESC_ON_STACK(shash, bctx->shash);
 363 
 364         shash->tfm = bctx->shash;
 365 
 366         return crypto_shash_init(shash) ?:
 367                crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
 368                crypto_shash_finup(shash, req->result, ctx->ds, req->result);
 369 }
 370 
 371 /* Initialize request context */
 372 static int mtk_sha_init(struct ahash_request *req)
 373 {
 374         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 375         struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 376         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 377 
 378         ctx->flags = 0;
 379         ctx->ds = crypto_ahash_digestsize(tfm);
 380 
 381         switch (ctx->ds) {
 382         case SHA1_DIGEST_SIZE:
 383                 ctx->flags |= SHA_FLAGS_SHA1;
 384                 ctx->bs = SHA1_BLOCK_SIZE;
 385                 break;
 386         case SHA224_DIGEST_SIZE:
 387                 ctx->flags |= SHA_FLAGS_SHA224;
 388                 ctx->bs = SHA224_BLOCK_SIZE;
 389                 break;
 390         case SHA256_DIGEST_SIZE:
 391                 ctx->flags |= SHA_FLAGS_SHA256;
 392                 ctx->bs = SHA256_BLOCK_SIZE;
 393                 break;
 394         case SHA384_DIGEST_SIZE:
 395                 ctx->flags |= SHA_FLAGS_SHA384;
 396                 ctx->bs = SHA384_BLOCK_SIZE;
 397                 break;
 398         case SHA512_DIGEST_SIZE:
 399                 ctx->flags |= SHA_FLAGS_SHA512;
 400                 ctx->bs = SHA512_BLOCK_SIZE;
 401                 break;
 402         default:
 403                 return -EINVAL;
 404         }
 405 
 406         ctx->bufcnt = 0;
 407         ctx->digcnt = 0;
 408         ctx->buffer = tctx->buf;
 409 
 410         if (tctx->flags & SHA_FLAGS_HMAC) {
 411                 struct mtk_sha_hmac_ctx *bctx = tctx->base;
 412 
 413                 memcpy(ctx->buffer, bctx->ipad, ctx->bs);
 414                 ctx->bufcnt = ctx->bs;
 415                 ctx->flags |= SHA_FLAGS_HMAC;
 416         }
 417 
 418         return 0;
 419 }
 420 
 421 static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha,
 422                         dma_addr_t addr1, size_t len1,
 423                         dma_addr_t addr2, size_t len2)
 424 {
 425         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 426         struct mtk_ring *ring = cryp->ring[sha->id];
 427         struct mtk_desc *cmd, *res;
 428         int err, count = 0;
 429 
 430         err = mtk_sha_info_update(cryp, sha, len1, len2);
 431         if (err)
 432                 return err;
 433 
 434         /* Fill in the command/result descriptors */
 435         mtk_sha_ring_shift(ring, &cmd, &res, &count);
 436 
 437         res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1);
 438         cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) |
 439                    MTK_DESC_CT_LEN(ctx->ct_size);
 440         cmd->buf = cpu_to_le32(addr1);
 441         cmd->ct = cpu_to_le32(ctx->ct_dma);
 442         cmd->ct_hdr = ctx->ct_hdr;
 443         cmd->tfm = cpu_to_le32(ctx->tfm_dma);
 444 
 445         if (len2) {
 446                 mtk_sha_ring_shift(ring, &cmd, &res, &count);
 447 
 448                 res->hdr = MTK_DESC_BUF_LEN(len2);
 449                 cmd->hdr = MTK_DESC_BUF_LEN(len2);
 450                 cmd->buf = cpu_to_le32(addr2);
 451         }
 452 
 453         cmd->hdr |= MTK_DESC_LAST;
 454         res->hdr |= MTK_DESC_LAST;
 455 
 456         /*
 457          * Make sure that all changes to the DMA ring are done before we
 458          * start engine.
 459          */
 460         wmb();
 461         /* Start DMA transfer */
 462         mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 463         mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count));
 464 
 465         return -EINPROGRESS;
 466 }
 467 
 468 static int mtk_sha_dma_map(struct mtk_cryp *cryp,
 469                            struct mtk_sha_rec *sha,
 470                            struct mtk_sha_reqctx *ctx,
 471                            size_t count)
 472 {
 473         ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 474                                        SHA_BUF_SIZE, DMA_TO_DEVICE);
 475         if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 476                 dev_err(cryp->dev, "dma map error\n");
 477                 return -EINVAL;
 478         }
 479 
 480         ctx->flags &= ~SHA_FLAGS_SG;
 481 
 482         return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0);
 483 }
 484 
 485 static int mtk_sha_update_slow(struct mtk_cryp *cryp,
 486                                struct mtk_sha_rec *sha)
 487 {
 488         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 489         size_t count;
 490         u32 final;
 491 
 492         mtk_sha_append_sg(ctx);
 493 
 494         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 495 
 496         dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt);
 497 
 498         if (final) {
 499                 sha->flags |= SHA_FLAGS_FINAL;
 500                 mtk_sha_fill_padding(ctx, 0);
 501         }
 502 
 503         if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) {
 504                 count = ctx->bufcnt;
 505                 ctx->bufcnt = 0;
 506 
 507                 return mtk_sha_dma_map(cryp, sha, ctx, count);
 508         }
 509         return 0;
 510 }
 511 
 512 static int mtk_sha_update_start(struct mtk_cryp *cryp,
 513                                 struct mtk_sha_rec *sha)
 514 {
 515         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 516         u32 len, final, tail;
 517         struct scatterlist *sg;
 518 
 519         if (!ctx->total)
 520                 return 0;
 521 
 522         if (ctx->bufcnt || ctx->offset)
 523                 return mtk_sha_update_slow(cryp, sha);
 524 
 525         sg = ctx->sg;
 526 
 527         if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 528                 return mtk_sha_update_slow(cryp, sha);
 529 
 530         if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs))
 531                 /* size is not ctx->bs aligned */
 532                 return mtk_sha_update_slow(cryp, sha);
 533 
 534         len = min(ctx->total, sg->length);
 535 
 536         if (sg_is_last(sg)) {
 537                 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 538                         /* not last sg must be ctx->bs aligned */
 539                         tail = len & (ctx->bs - 1);
 540                         len -= tail;
 541                 }
 542         }
 543 
 544         ctx->total -= len;
 545         ctx->offset = len; /* offset where to start slow */
 546 
 547         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 548 
 549         /* Add padding */
 550         if (final) {
 551                 size_t count;
 552 
 553                 tail = len & (ctx->bs - 1);
 554                 len -= tail;
 555                 ctx->total += tail;
 556                 ctx->offset = len; /* offset where to start slow */
 557 
 558                 sg = ctx->sg;
 559                 mtk_sha_append_sg(ctx);
 560                 mtk_sha_fill_padding(ctx, len);
 561 
 562                 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer,
 563                                                SHA_BUF_SIZE, DMA_TO_DEVICE);
 564                 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) {
 565                         dev_err(cryp->dev, "dma map bytes error\n");
 566                         return -EINVAL;
 567                 }
 568 
 569                 sha->flags |= SHA_FLAGS_FINAL;
 570                 count = ctx->bufcnt;
 571                 ctx->bufcnt = 0;
 572 
 573                 if (len == 0) {
 574                         ctx->flags &= ~SHA_FLAGS_SG;
 575                         return mtk_sha_xmit(cryp, sha, ctx->dma_addr,
 576                                             count, 0, 0);
 577 
 578                 } else {
 579                         ctx->sg = sg;
 580                         if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 581                                 dev_err(cryp->dev, "dma_map_sg error\n");
 582                                 return -EINVAL;
 583                         }
 584 
 585                         ctx->flags |= SHA_FLAGS_SG;
 586                         return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 587                                             len, ctx->dma_addr, count);
 588                 }
 589         }
 590 
 591         if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 592                 dev_err(cryp->dev, "dma_map_sg  error\n");
 593                 return -EINVAL;
 594         }
 595 
 596         ctx->flags |= SHA_FLAGS_SG;
 597 
 598         return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg),
 599                             len, 0, 0);
 600 }
 601 
 602 static int mtk_sha_final_req(struct mtk_cryp *cryp,
 603                              struct mtk_sha_rec *sha)
 604 {
 605         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 606         size_t count;
 607 
 608         mtk_sha_fill_padding(ctx, 0);
 609 
 610         sha->flags |= SHA_FLAGS_FINAL;
 611         count = ctx->bufcnt;
 612         ctx->bufcnt = 0;
 613 
 614         return mtk_sha_dma_map(cryp, sha, ctx, count);
 615 }
 616 
 617 /* Copy ready hash (+ finalize hmac) */
 618 static int mtk_sha_finish(struct ahash_request *req)
 619 {
 620         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 621         __le32 *digest = ctx->info.digest;
 622         u32 *result = (u32 *)req->result;
 623         int i;
 624 
 625         /* Get the hash from the digest buffer */
 626         for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++)
 627                 result[i] = le32_to_cpu(digest[i]);
 628 
 629         if (ctx->flags & SHA_FLAGS_HMAC)
 630                 return mtk_sha_finish_hmac(req);
 631 
 632         return 0;
 633 }
 634 
 635 static void mtk_sha_finish_req(struct mtk_cryp *cryp,
 636                                struct mtk_sha_rec *sha,
 637                                int err)
 638 {
 639         if (likely(!err && (SHA_FLAGS_FINAL & sha->flags)))
 640                 err = mtk_sha_finish(sha->req);
 641 
 642         sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL);
 643 
 644         sha->req->base.complete(&sha->req->base, err);
 645 
 646         /* Handle new request */
 647         tasklet_schedule(&sha->queue_task);
 648 }
 649 
 650 static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id,
 651                                 struct ahash_request *req)
 652 {
 653         struct mtk_sha_rec *sha = cryp->sha[id];
 654         struct crypto_async_request *async_req, *backlog;
 655         struct mtk_sha_reqctx *ctx;
 656         unsigned long flags;
 657         int err = 0, ret = 0;
 658 
 659         spin_lock_irqsave(&sha->lock, flags);
 660         if (req)
 661                 ret = ahash_enqueue_request(&sha->queue, req);
 662 
 663         if (SHA_FLAGS_BUSY & sha->flags) {
 664                 spin_unlock_irqrestore(&sha->lock, flags);
 665                 return ret;
 666         }
 667 
 668         backlog = crypto_get_backlog(&sha->queue);
 669         async_req = crypto_dequeue_request(&sha->queue);
 670         if (async_req)
 671                 sha->flags |= SHA_FLAGS_BUSY;
 672         spin_unlock_irqrestore(&sha->lock, flags);
 673 
 674         if (!async_req)
 675                 return ret;
 676 
 677         if (backlog)
 678                 backlog->complete(backlog, -EINPROGRESS);
 679 
 680         req = ahash_request_cast(async_req);
 681         ctx = ahash_request_ctx(req);
 682 
 683         sha->req = req;
 684 
 685         mtk_sha_info_init(ctx);
 686 
 687         if (ctx->op == SHA_OP_UPDATE) {
 688                 err = mtk_sha_update_start(cryp, sha);
 689                 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP))
 690                         /* No final() after finup() */
 691                         err = mtk_sha_final_req(cryp, sha);
 692         } else if (ctx->op == SHA_OP_FINAL) {
 693                 err = mtk_sha_final_req(cryp, sha);
 694         }
 695 
 696         if (unlikely(err != -EINPROGRESS))
 697                 /* Task will not finish it, so do it here */
 698                 mtk_sha_finish_req(cryp, sha, err);
 699 
 700         return ret;
 701 }
 702 
 703 static int mtk_sha_enqueue(struct ahash_request *req, u32 op)
 704 {
 705         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 706         struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 707 
 708         ctx->op = op;
 709 
 710         return mtk_sha_handle_queue(tctx->cryp, tctx->id, req);
 711 }
 712 
 713 static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha)
 714 {
 715         struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req);
 716 
 717         dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
 718                          DMA_BIDIRECTIONAL);
 719 
 720         if (ctx->flags & SHA_FLAGS_SG) {
 721                 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE);
 722                 if (ctx->sg->length == ctx->offset) {
 723                         ctx->sg = sg_next(ctx->sg);
 724                         if (ctx->sg)
 725                                 ctx->offset = 0;
 726                 }
 727                 if (ctx->flags & SHA_FLAGS_PAD) {
 728                         dma_unmap_single(cryp->dev, ctx->dma_addr,
 729                                          SHA_BUF_SIZE, DMA_TO_DEVICE);
 730                 }
 731         } else
 732                 dma_unmap_single(cryp->dev, ctx->dma_addr,
 733                                  SHA_BUF_SIZE, DMA_TO_DEVICE);
 734 }
 735 
 736 static void mtk_sha_complete(struct mtk_cryp *cryp,
 737                              struct mtk_sha_rec *sha)
 738 {
 739         int err = 0;
 740 
 741         err = mtk_sha_update_start(cryp, sha);
 742         if (err != -EINPROGRESS)
 743                 mtk_sha_finish_req(cryp, sha, err);
 744 }
 745 
 746 static int mtk_sha_update(struct ahash_request *req)
 747 {
 748         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 749 
 750         ctx->total = req->nbytes;
 751         ctx->sg = req->src;
 752         ctx->offset = 0;
 753 
 754         if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) &&
 755             !(ctx->flags & SHA_FLAGS_FINUP))
 756                 return mtk_sha_append_sg(ctx);
 757 
 758         return mtk_sha_enqueue(req, SHA_OP_UPDATE);
 759 }
 760 
 761 static int mtk_sha_final(struct ahash_request *req)
 762 {
 763         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 764 
 765         ctx->flags |= SHA_FLAGS_FINUP;
 766 
 767         if (ctx->flags & SHA_FLAGS_PAD)
 768                 return mtk_sha_finish(req);
 769 
 770         return mtk_sha_enqueue(req, SHA_OP_FINAL);
 771 }
 772 
 773 static int mtk_sha_finup(struct ahash_request *req)
 774 {
 775         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 776         int err1, err2;
 777 
 778         ctx->flags |= SHA_FLAGS_FINUP;
 779 
 780         err1 = mtk_sha_update(req);
 781         if (err1 == -EINPROGRESS ||
 782             (err1 == -EBUSY && (ahash_request_flags(req) &
 783                                 CRYPTO_TFM_REQ_MAY_BACKLOG)))
 784                 return err1;
 785         /*
 786          * final() has to be always called to cleanup resources
 787          * even if update() failed
 788          */
 789         err2 = mtk_sha_final(req);
 790 
 791         return err1 ?: err2;
 792 }
 793 
 794 static int mtk_sha_digest(struct ahash_request *req)
 795 {
 796         return mtk_sha_init(req) ?: mtk_sha_finup(req);
 797 }
 798 
 799 static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
 800                           u32 keylen)
 801 {
 802         struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 803         struct mtk_sha_hmac_ctx *bctx = tctx->base;
 804         size_t bs = crypto_shash_blocksize(bctx->shash);
 805         size_t ds = crypto_shash_digestsize(bctx->shash);
 806         int err, i;
 807 
 808         SHASH_DESC_ON_STACK(shash, bctx->shash);
 809 
 810         shash->tfm = bctx->shash;
 811 
 812         if (keylen > bs) {
 813                 err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
 814                 if (err)
 815                         return err;
 816                 keylen = ds;
 817         } else {
 818                 memcpy(bctx->ipad, key, keylen);
 819         }
 820 
 821         memset(bctx->ipad + keylen, 0, bs - keylen);
 822         memcpy(bctx->opad, bctx->ipad, bs);
 823 
 824         for (i = 0; i < bs; i++) {
 825                 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
 826                 bctx->opad[i] ^= HMAC_OPAD_VALUE;
 827         }
 828 
 829         return 0;
 830 }
 831 
 832 static int mtk_sha_export(struct ahash_request *req, void *out)
 833 {
 834         const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 835 
 836         memcpy(out, ctx, sizeof(*ctx));
 837         return 0;
 838 }
 839 
 840 static int mtk_sha_import(struct ahash_request *req, const void *in)
 841 {
 842         struct mtk_sha_reqctx *ctx = ahash_request_ctx(req);
 843 
 844         memcpy(ctx, in, sizeof(*ctx));
 845         return 0;
 846 }
 847 
 848 static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm,
 849                                 const char *alg_base)
 850 {
 851         struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 852         struct mtk_cryp *cryp = NULL;
 853 
 854         cryp = mtk_sha_find_dev(tctx);
 855         if (!cryp)
 856                 return -ENODEV;
 857 
 858         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 859                                  sizeof(struct mtk_sha_reqctx));
 860 
 861         if (alg_base) {
 862                 struct mtk_sha_hmac_ctx *bctx = tctx->base;
 863 
 864                 tctx->flags |= SHA_FLAGS_HMAC;
 865                 bctx->shash = crypto_alloc_shash(alg_base, 0,
 866                                         CRYPTO_ALG_NEED_FALLBACK);
 867                 if (IS_ERR(bctx->shash)) {
 868                         pr_err("base driver %s could not be loaded.\n",
 869                                alg_base);
 870 
 871                         return PTR_ERR(bctx->shash);
 872                 }
 873         }
 874         return 0;
 875 }
 876 
 877 static int mtk_sha_cra_init(struct crypto_tfm *tfm)
 878 {
 879         return mtk_sha_cra_init_alg(tfm, NULL);
 880 }
 881 
 882 static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm)
 883 {
 884         return mtk_sha_cra_init_alg(tfm, "sha1");
 885 }
 886 
 887 static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm)
 888 {
 889         return mtk_sha_cra_init_alg(tfm, "sha224");
 890 }
 891 
 892 static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm)
 893 {
 894         return mtk_sha_cra_init_alg(tfm, "sha256");
 895 }
 896 
 897 static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm)
 898 {
 899         return mtk_sha_cra_init_alg(tfm, "sha384");
 900 }
 901 
 902 static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm)
 903 {
 904         return mtk_sha_cra_init_alg(tfm, "sha512");
 905 }
 906 
 907 static void mtk_sha_cra_exit(struct crypto_tfm *tfm)
 908 {
 909         struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 910 
 911         if (tctx->flags & SHA_FLAGS_HMAC) {
 912                 struct mtk_sha_hmac_ctx *bctx = tctx->base;
 913 
 914                 crypto_free_shash(bctx->shash);
 915         }
 916 }
 917 
 918 static struct ahash_alg algs_sha1_sha224_sha256[] = {
 919 {
 920         .init           = mtk_sha_init,
 921         .update         = mtk_sha_update,
 922         .final          = mtk_sha_final,
 923         .finup          = mtk_sha_finup,
 924         .digest         = mtk_sha_digest,
 925         .export         = mtk_sha_export,
 926         .import         = mtk_sha_import,
 927         .halg.digestsize        = SHA1_DIGEST_SIZE,
 928         .halg.statesize = sizeof(struct mtk_sha_reqctx),
 929         .halg.base      = {
 930                 .cra_name               = "sha1",
 931                 .cra_driver_name        = "mtk-sha1",
 932                 .cra_priority           = 400,
 933                 .cra_flags              = CRYPTO_ALG_ASYNC,
 934                 .cra_blocksize          = SHA1_BLOCK_SIZE,
 935                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 936                 .cra_alignmask          = SHA_ALIGN_MSK,
 937                 .cra_module             = THIS_MODULE,
 938                 .cra_init               = mtk_sha_cra_init,
 939                 .cra_exit               = mtk_sha_cra_exit,
 940         }
 941 },
 942 {
 943         .init           = mtk_sha_init,
 944         .update         = mtk_sha_update,
 945         .final          = mtk_sha_final,
 946         .finup          = mtk_sha_finup,
 947         .digest         = mtk_sha_digest,
 948         .export         = mtk_sha_export,
 949         .import         = mtk_sha_import,
 950         .halg.digestsize        = SHA224_DIGEST_SIZE,
 951         .halg.statesize = sizeof(struct mtk_sha_reqctx),
 952         .halg.base      = {
 953                 .cra_name               = "sha224",
 954                 .cra_driver_name        = "mtk-sha224",
 955                 .cra_priority           = 400,
 956                 .cra_flags              = CRYPTO_ALG_ASYNC,
 957                 .cra_blocksize          = SHA224_BLOCK_SIZE,
 958                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 959                 .cra_alignmask          = SHA_ALIGN_MSK,
 960                 .cra_module             = THIS_MODULE,
 961                 .cra_init               = mtk_sha_cra_init,
 962                 .cra_exit               = mtk_sha_cra_exit,
 963         }
 964 },
 965 {
 966         .init           = mtk_sha_init,
 967         .update         = mtk_sha_update,
 968         .final          = mtk_sha_final,
 969         .finup          = mtk_sha_finup,
 970         .digest         = mtk_sha_digest,
 971         .export         = mtk_sha_export,
 972         .import         = mtk_sha_import,
 973         .halg.digestsize        = SHA256_DIGEST_SIZE,
 974         .halg.statesize = sizeof(struct mtk_sha_reqctx),
 975         .halg.base      = {
 976                 .cra_name               = "sha256",
 977                 .cra_driver_name        = "mtk-sha256",
 978                 .cra_priority           = 400,
 979                 .cra_flags              = CRYPTO_ALG_ASYNC,
 980                 .cra_blocksize          = SHA256_BLOCK_SIZE,
 981                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
 982                 .cra_alignmask          = SHA_ALIGN_MSK,
 983                 .cra_module             = THIS_MODULE,
 984                 .cra_init               = mtk_sha_cra_init,
 985                 .cra_exit               = mtk_sha_cra_exit,
 986         }
 987 },
 988 {
 989         .init           = mtk_sha_init,
 990         .update         = mtk_sha_update,
 991         .final          = mtk_sha_final,
 992         .finup          = mtk_sha_finup,
 993         .digest         = mtk_sha_digest,
 994         .export         = mtk_sha_export,
 995         .import         = mtk_sha_import,
 996         .setkey         = mtk_sha_setkey,
 997         .halg.digestsize        = SHA1_DIGEST_SIZE,
 998         .halg.statesize = sizeof(struct mtk_sha_reqctx),
 999         .halg.base      = {
1000                 .cra_name               = "hmac(sha1)",
1001                 .cra_driver_name        = "mtk-hmac-sha1",
1002                 .cra_priority           = 400,
1003                 .cra_flags              = CRYPTO_ALG_ASYNC |
1004                                           CRYPTO_ALG_NEED_FALLBACK,
1005                 .cra_blocksize          = SHA1_BLOCK_SIZE,
1006                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1007                                         sizeof(struct mtk_sha_hmac_ctx),
1008                 .cra_alignmask          = SHA_ALIGN_MSK,
1009                 .cra_module             = THIS_MODULE,
1010                 .cra_init               = mtk_sha_cra_sha1_init,
1011                 .cra_exit               = mtk_sha_cra_exit,
1012         }
1013 },
1014 {
1015         .init           = mtk_sha_init,
1016         .update         = mtk_sha_update,
1017         .final          = mtk_sha_final,
1018         .finup          = mtk_sha_finup,
1019         .digest         = mtk_sha_digest,
1020         .export         = mtk_sha_export,
1021         .import         = mtk_sha_import,
1022         .setkey         = mtk_sha_setkey,
1023         .halg.digestsize        = SHA224_DIGEST_SIZE,
1024         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1025         .halg.base      = {
1026                 .cra_name               = "hmac(sha224)",
1027                 .cra_driver_name        = "mtk-hmac-sha224",
1028                 .cra_priority           = 400,
1029                 .cra_flags              = CRYPTO_ALG_ASYNC |
1030                                           CRYPTO_ALG_NEED_FALLBACK,
1031                 .cra_blocksize          = SHA224_BLOCK_SIZE,
1032                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1033                                         sizeof(struct mtk_sha_hmac_ctx),
1034                 .cra_alignmask          = SHA_ALIGN_MSK,
1035                 .cra_module             = THIS_MODULE,
1036                 .cra_init               = mtk_sha_cra_sha224_init,
1037                 .cra_exit               = mtk_sha_cra_exit,
1038         }
1039 },
1040 {
1041         .init           = mtk_sha_init,
1042         .update         = mtk_sha_update,
1043         .final          = mtk_sha_final,
1044         .finup          = mtk_sha_finup,
1045         .digest         = mtk_sha_digest,
1046         .export         = mtk_sha_export,
1047         .import         = mtk_sha_import,
1048         .setkey         = mtk_sha_setkey,
1049         .halg.digestsize        = SHA256_DIGEST_SIZE,
1050         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1051         .halg.base      = {
1052                 .cra_name               = "hmac(sha256)",
1053                 .cra_driver_name        = "mtk-hmac-sha256",
1054                 .cra_priority           = 400,
1055                 .cra_flags              = CRYPTO_ALG_ASYNC |
1056                                           CRYPTO_ALG_NEED_FALLBACK,
1057                 .cra_blocksize          = SHA256_BLOCK_SIZE,
1058                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1059                                         sizeof(struct mtk_sha_hmac_ctx),
1060                 .cra_alignmask          = SHA_ALIGN_MSK,
1061                 .cra_module             = THIS_MODULE,
1062                 .cra_init               = mtk_sha_cra_sha256_init,
1063                 .cra_exit               = mtk_sha_cra_exit,
1064         }
1065 },
1066 };
1067 
1068 static struct ahash_alg algs_sha384_sha512[] = {
1069 {
1070         .init           = mtk_sha_init,
1071         .update         = mtk_sha_update,
1072         .final          = mtk_sha_final,
1073         .finup          = mtk_sha_finup,
1074         .digest         = mtk_sha_digest,
1075         .export         = mtk_sha_export,
1076         .import         = mtk_sha_import,
1077         .halg.digestsize        = SHA384_DIGEST_SIZE,
1078         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1079         .halg.base      = {
1080                 .cra_name               = "sha384",
1081                 .cra_driver_name        = "mtk-sha384",
1082                 .cra_priority           = 400,
1083                 .cra_flags              = CRYPTO_ALG_ASYNC,
1084                 .cra_blocksize          = SHA384_BLOCK_SIZE,
1085                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1086                 .cra_alignmask          = SHA_ALIGN_MSK,
1087                 .cra_module             = THIS_MODULE,
1088                 .cra_init               = mtk_sha_cra_init,
1089                 .cra_exit               = mtk_sha_cra_exit,
1090         }
1091 },
1092 {
1093         .init           = mtk_sha_init,
1094         .update         = mtk_sha_update,
1095         .final          = mtk_sha_final,
1096         .finup          = mtk_sha_finup,
1097         .digest         = mtk_sha_digest,
1098         .export         = mtk_sha_export,
1099         .import         = mtk_sha_import,
1100         .halg.digestsize        = SHA512_DIGEST_SIZE,
1101         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1102         .halg.base      = {
1103                 .cra_name               = "sha512",
1104                 .cra_driver_name        = "mtk-sha512",
1105                 .cra_priority           = 400,
1106                 .cra_flags              = CRYPTO_ALG_ASYNC,
1107                 .cra_blocksize          = SHA512_BLOCK_SIZE,
1108                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx),
1109                 .cra_alignmask          = SHA_ALIGN_MSK,
1110                 .cra_module             = THIS_MODULE,
1111                 .cra_init               = mtk_sha_cra_init,
1112                 .cra_exit               = mtk_sha_cra_exit,
1113         }
1114 },
1115 {
1116         .init           = mtk_sha_init,
1117         .update         = mtk_sha_update,
1118         .final          = mtk_sha_final,
1119         .finup          = mtk_sha_finup,
1120         .digest         = mtk_sha_digest,
1121         .export         = mtk_sha_export,
1122         .import         = mtk_sha_import,
1123         .setkey         = mtk_sha_setkey,
1124         .halg.digestsize        = SHA384_DIGEST_SIZE,
1125         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1126         .halg.base      = {
1127                 .cra_name               = "hmac(sha384)",
1128                 .cra_driver_name        = "mtk-hmac-sha384",
1129                 .cra_priority           = 400,
1130                 .cra_flags              = CRYPTO_ALG_ASYNC |
1131                                           CRYPTO_ALG_NEED_FALLBACK,
1132                 .cra_blocksize          = SHA384_BLOCK_SIZE,
1133                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1134                                         sizeof(struct mtk_sha_hmac_ctx),
1135                 .cra_alignmask          = SHA_ALIGN_MSK,
1136                 .cra_module             = THIS_MODULE,
1137                 .cra_init               = mtk_sha_cra_sha384_init,
1138                 .cra_exit               = mtk_sha_cra_exit,
1139         }
1140 },
1141 {
1142         .init           = mtk_sha_init,
1143         .update         = mtk_sha_update,
1144         .final          = mtk_sha_final,
1145         .finup          = mtk_sha_finup,
1146         .digest         = mtk_sha_digest,
1147         .export         = mtk_sha_export,
1148         .import         = mtk_sha_import,
1149         .setkey         = mtk_sha_setkey,
1150         .halg.digestsize        = SHA512_DIGEST_SIZE,
1151         .halg.statesize = sizeof(struct mtk_sha_reqctx),
1152         .halg.base      = {
1153                 .cra_name               = "hmac(sha512)",
1154                 .cra_driver_name        = "mtk-hmac-sha512",
1155                 .cra_priority           = 400,
1156                 .cra_flags              = CRYPTO_ALG_ASYNC |
1157                                           CRYPTO_ALG_NEED_FALLBACK,
1158                 .cra_blocksize          = SHA512_BLOCK_SIZE,
1159                 .cra_ctxsize            = sizeof(struct mtk_sha_ctx) +
1160                                         sizeof(struct mtk_sha_hmac_ctx),
1161                 .cra_alignmask          = SHA_ALIGN_MSK,
1162                 .cra_module             = THIS_MODULE,
1163                 .cra_init               = mtk_sha_cra_sha512_init,
1164                 .cra_exit               = mtk_sha_cra_exit,
1165         }
1166 },
1167 };
1168 
1169 static void mtk_sha_queue_task(unsigned long data)
1170 {
1171         struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1172 
1173         mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL);
1174 }
1175 
1176 static void mtk_sha_done_task(unsigned long data)
1177 {
1178         struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data;
1179         struct mtk_cryp *cryp = sha->cryp;
1180 
1181         mtk_sha_unmap(cryp, sha);
1182         mtk_sha_complete(cryp, sha);
1183 }
1184 
1185 static irqreturn_t mtk_sha_irq(int irq, void *dev_id)
1186 {
1187         struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id;
1188         struct mtk_cryp *cryp = sha->cryp;
1189         u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id));
1190 
1191         mtk_sha_write(cryp, RDR_STAT(sha->id), val);
1192 
1193         if (likely((SHA_FLAGS_BUSY & sha->flags))) {
1194                 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST);
1195                 mtk_sha_write(cryp, RDR_THRESH(sha->id),
1196                               MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
1197 
1198                 tasklet_schedule(&sha->done_task);
1199         } else {
1200                 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n");
1201         }
1202         return IRQ_HANDLED;
1203 }
1204 
1205 /*
1206  * The purpose of two SHA records is used to get extra performance.
1207  * It is similar to mtk_aes_record_init().
1208  */
1209 static int mtk_sha_record_init(struct mtk_cryp *cryp)
1210 {
1211         struct mtk_sha_rec **sha = cryp->sha;
1212         int i, err = -ENOMEM;
1213 
1214         for (i = 0; i < MTK_REC_NUM; i++) {
1215                 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL);
1216                 if (!sha[i])
1217                         goto err_cleanup;
1218 
1219                 sha[i]->cryp = cryp;
1220 
1221                 spin_lock_init(&sha[i]->lock);
1222                 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE);
1223 
1224                 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task,
1225                              (unsigned long)sha[i]);
1226                 tasklet_init(&sha[i]->done_task, mtk_sha_done_task,
1227                              (unsigned long)sha[i]);
1228         }
1229 
1230         /* Link to ring2 and ring3 respectively */
1231         sha[0]->id = MTK_RING2;
1232         sha[1]->id = MTK_RING3;
1233 
1234         cryp->rec = 1;
1235 
1236         return 0;
1237 
1238 err_cleanup:
1239         for (; i--; )
1240                 kfree(sha[i]);
1241         return err;
1242 }
1243 
1244 static void mtk_sha_record_free(struct mtk_cryp *cryp)
1245 {
1246         int i;
1247 
1248         for (i = 0; i < MTK_REC_NUM; i++) {
1249                 tasklet_kill(&cryp->sha[i]->done_task);
1250                 tasklet_kill(&cryp->sha[i]->queue_task);
1251 
1252                 kfree(cryp->sha[i]);
1253         }
1254 }
1255 
1256 static void mtk_sha_unregister_algs(void)
1257 {
1258         int i;
1259 
1260         for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++)
1261                 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1262 
1263         for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++)
1264                 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1265 }
1266 
1267 static int mtk_sha_register_algs(void)
1268 {
1269         int err, i;
1270 
1271         for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) {
1272                 err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]);
1273                 if (err)
1274                         goto err_sha_224_256_algs;
1275         }
1276 
1277         for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) {
1278                 err = crypto_register_ahash(&algs_sha384_sha512[i]);
1279                 if (err)
1280                         goto err_sha_384_512_algs;
1281         }
1282 
1283         return 0;
1284 
1285 err_sha_384_512_algs:
1286         for (; i--; )
1287                 crypto_unregister_ahash(&algs_sha384_sha512[i]);
1288         i = ARRAY_SIZE(algs_sha1_sha224_sha256);
1289 err_sha_224_256_algs:
1290         for (; i--; )
1291                 crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]);
1292 
1293         return err;
1294 }
1295 
1296 int mtk_hash_alg_register(struct mtk_cryp *cryp)
1297 {
1298         int err;
1299 
1300         INIT_LIST_HEAD(&cryp->sha_list);
1301 
1302         /* Initialize two hash records */
1303         err = mtk_sha_record_init(cryp);
1304         if (err)
1305                 goto err_record;
1306 
1307         err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq,
1308                                0, "mtk-sha", cryp->sha[0]);
1309         if (err) {
1310                 dev_err(cryp->dev, "unable to request sha irq0.\n");
1311                 goto err_res;
1312         }
1313 
1314         err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq,
1315                                0, "mtk-sha", cryp->sha[1]);
1316         if (err) {
1317                 dev_err(cryp->dev, "unable to request sha irq1.\n");
1318                 goto err_res;
1319         }
1320 
1321         /* Enable ring2 and ring3 interrupt for hash */
1322         mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2);
1323         mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3);
1324 
1325         spin_lock(&mtk_sha.lock);
1326         list_add_tail(&cryp->sha_list, &mtk_sha.dev_list);
1327         spin_unlock(&mtk_sha.lock);
1328 
1329         err = mtk_sha_register_algs();
1330         if (err)
1331                 goto err_algs;
1332 
1333         return 0;
1334 
1335 err_algs:
1336         spin_lock(&mtk_sha.lock);
1337         list_del(&cryp->sha_list);
1338         spin_unlock(&mtk_sha.lock);
1339 err_res:
1340         mtk_sha_record_free(cryp);
1341 err_record:
1342 
1343         dev_err(cryp->dev, "mtk-sha initialization failed.\n");
1344         return err;
1345 }
1346 
1347 void mtk_hash_alg_release(struct mtk_cryp *cryp)
1348 {
1349         spin_lock(&mtk_sha.lock);
1350         list_del(&cryp->sha_list);
1351         spin_unlock(&mtk_sha.lock);
1352 
1353         mtk_sha_unregister_algs();
1354         mtk_sha_record_free(cryp);
1355 }

/* [<][>][^][v][top][bottom][index][help] */