root/drivers/crypto/atmel-sha.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. atmel_sha_reg_name
  2. atmel_sha_read
  3. atmel_sha_write
  4. atmel_sha_complete
  5. atmel_sha_append_sg
  6. atmel_sha_fill_padding
  7. atmel_sha_find_dev
  8. atmel_sha_init
  9. atmel_sha_write_ctrl
  10. atmel_sha_wait_for_data_ready
  11. atmel_sha_xmit_cpu
  12. atmel_sha_xmit_pdc
  13. atmel_sha_dma_callback
  14. atmel_sha_xmit_dma
  15. atmel_sha_xmit_start
  16. atmel_sha_update_cpu
  17. atmel_sha_xmit_dma_map
  18. atmel_sha_update_dma_slow
  19. atmel_sha_update_dma_start
  20. atmel_sha_update_dma_stop
  21. atmel_sha_update_req
  22. atmel_sha_final_req
  23. atmel_sha_copy_hash
  24. atmel_sha_copy_ready_hash
  25. atmel_sha_finish
  26. atmel_sha_finish_req
  27. atmel_sha_hw_init
  28. atmel_sha_get_version
  29. atmel_sha_hw_version_init
  30. atmel_sha_handle_queue
  31. atmel_sha_start
  32. atmel_sha_enqueue
  33. atmel_sha_update
  34. atmel_sha_final
  35. atmel_sha_finup
  36. atmel_sha_digest
  37. atmel_sha_export
  38. atmel_sha_import
  39. atmel_sha_cra_init
  40. atmel_sha_queue_task
  41. atmel_sha_done
  42. atmel_sha_done_task
  43. atmel_sha_irq
  44. atmel_sha_dma_check_aligned
  45. atmel_sha_dma_callback2
  46. atmel_sha_dma_start
  47. atmel_sha_cpu_transfer
  48. atmel_sha_cpu_start
  49. atmel_sha_cpu_hash
  50. atmel_sha_hmac_key_init
  51. atmel_sha_hmac_key_release
  52. atmel_sha_hmac_key_set
  53. atmel_sha_hmac_key_get
  54. atmel_sha_hmac_setup
  55. atmel_sha_hmac_prehash_key
  56. atmel_sha_hmac_prehash_key_done
  57. atmel_sha_hmac_compute_ipad_hash
  58. atmel_sha_hmac_compute_opad_hash
  59. atmel_sha_hmac_setup_done
  60. atmel_sha_hmac_start
  61. atmel_sha_hmac_setkey
  62. atmel_sha_hmac_init
  63. atmel_sha_hmac_init_done
  64. atmel_sha_hmac_final
  65. atmel_sha_hmac_final_done
  66. atmel_sha_hmac_digest
  67. atmel_sha_hmac_digest2
  68. atmel_sha_hmac_cra_init
  69. atmel_sha_hmac_cra_exit
  70. atmel_sha_authenc_complete
  71. atmel_sha_authenc_start
  72. atmel_sha_authenc_is_ready
  73. atmel_sha_authenc_get_reqsize
  74. atmel_sha_authenc_spawn
  75. atmel_sha_authenc_free
  76. atmel_sha_authenc_setkey
  77. atmel_sha_authenc_schedule
  78. atmel_sha_authenc_init
  79. atmel_sha_authenc_init2
  80. atmel_sha_authenc_init_done
  81. atmel_sha_authenc_final
  82. atmel_sha_authenc_final_done
  83. atmel_sha_authenc_abort
  84. atmel_sha_unregister_algs
  85. atmel_sha_register_algs
  86. atmel_sha_filter
  87. atmel_sha_dma_init
  88. atmel_sha_dma_cleanup
  89. atmel_sha_get_cap
  90. atmel_sha_of_init
  91. atmel_sha_of_init
  92. atmel_sha_probe
  93. atmel_sha_remove

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Cryptographic API.
   4  *
   5  * Support for ATMEL SHA1/SHA256 HW acceleration.
   6  *
   7  * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   8  * Author: Nicolas Royer <nicolas@eukrea.com>
   9  *
  10  * Some ideas are from omap-sham.c drivers.
  11  */
  12 
  13 
  14 #include <linux/kernel.h>
  15 #include <linux/module.h>
  16 #include <linux/slab.h>
  17 #include <linux/err.h>
  18 #include <linux/clk.h>
  19 #include <linux/io.h>
  20 #include <linux/hw_random.h>
  21 #include <linux/platform_device.h>
  22 
  23 #include <linux/device.h>
  24 #include <linux/init.h>
  25 #include <linux/errno.h>
  26 #include <linux/interrupt.h>
  27 #include <linux/irq.h>
  28 #include <linux/scatterlist.h>
  29 #include <linux/dma-mapping.h>
  30 #include <linux/of_device.h>
  31 #include <linux/delay.h>
  32 #include <linux/crypto.h>
  33 #include <linux/cryptohash.h>
  34 #include <crypto/scatterwalk.h>
  35 #include <crypto/algapi.h>
  36 #include <crypto/sha.h>
  37 #include <crypto/hash.h>
  38 #include <crypto/internal/hash.h>
  39 #include <linux/platform_data/crypto-atmel.h>
  40 #include "atmel-sha-regs.h"
  41 #include "atmel-authenc.h"
  42 
  43 /* SHA flags */
  44 #define SHA_FLAGS_BUSY                  BIT(0)
  45 #define SHA_FLAGS_FINAL                 BIT(1)
  46 #define SHA_FLAGS_DMA_ACTIVE    BIT(2)
  47 #define SHA_FLAGS_OUTPUT_READY  BIT(3)
  48 #define SHA_FLAGS_INIT                  BIT(4)
  49 #define SHA_FLAGS_CPU                   BIT(5)
  50 #define SHA_FLAGS_DMA_READY             BIT(6)
  51 #define SHA_FLAGS_DUMP_REG      BIT(7)
  52 
  53 /* bits[11:8] are reserved. */
  54 
  55 #define SHA_FLAGS_FINUP         BIT(16)
  56 #define SHA_FLAGS_SG            BIT(17)
  57 #define SHA_FLAGS_ERROR         BIT(23)
  58 #define SHA_FLAGS_PAD           BIT(24)
  59 #define SHA_FLAGS_RESTORE       BIT(25)
  60 #define SHA_FLAGS_IDATAR0       BIT(26)
  61 #define SHA_FLAGS_WAIT_DATARDY  BIT(27)
  62 
  63 #define SHA_OP_INIT     0
  64 #define SHA_OP_UPDATE   1
  65 #define SHA_OP_FINAL    2
  66 #define SHA_OP_DIGEST   3
  67 
  68 #define SHA_BUFFER_LEN          (PAGE_SIZE / 16)
  69 
  70 #define ATMEL_SHA_DMA_THRESHOLD         56
  71 
  72 struct atmel_sha_caps {
  73         bool    has_dma;
  74         bool    has_dualbuff;
  75         bool    has_sha224;
  76         bool    has_sha_384_512;
  77         bool    has_uihv;
  78         bool    has_hmac;
  79 };
  80 
  81 struct atmel_sha_dev;
  82 
  83 /*
  84  * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as
  85  * tested by the ahash_prepare_alg() function.
  86  */
  87 struct atmel_sha_reqctx {
  88         struct atmel_sha_dev    *dd;
  89         unsigned long   flags;
  90         unsigned long   op;
  91 
  92         u8      digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
  93         u64     digcnt[2];
  94         size_t  bufcnt;
  95         size_t  buflen;
  96         dma_addr_t      dma_addr;
  97 
  98         /* walk state */
  99         struct scatterlist      *sg;
 100         unsigned int    offset; /* offset in current sg */
 101         unsigned int    total;  /* total request */
 102 
 103         size_t block_size;
 104         size_t hash_size;
 105 
 106         u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 107 };
 108 
 109 typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *);
 110 
 111 struct atmel_sha_ctx {
 112         struct atmel_sha_dev    *dd;
 113         atmel_sha_fn_t          start;
 114 
 115         unsigned long           flags;
 116 };
 117 
 118 #define ATMEL_SHA_QUEUE_LENGTH  50
 119 
 120 struct atmel_sha_dma {
 121         struct dma_chan                 *chan;
 122         struct dma_slave_config dma_conf;
 123         struct scatterlist      *sg;
 124         int                     nents;
 125         unsigned int            last_sg_length;
 126 };
 127 
 128 struct atmel_sha_dev {
 129         struct list_head        list;
 130         unsigned long           phys_base;
 131         struct device           *dev;
 132         struct clk                      *iclk;
 133         int                                     irq;
 134         void __iomem            *io_base;
 135 
 136         spinlock_t              lock;
 137         int                     err;
 138         struct tasklet_struct   done_task;
 139         struct tasklet_struct   queue_task;
 140 
 141         unsigned long           flags;
 142         struct crypto_queue     queue;
 143         struct ahash_request    *req;
 144         bool                    is_async;
 145         bool                    force_complete;
 146         atmel_sha_fn_t          resume;
 147         atmel_sha_fn_t          cpu_transfer_complete;
 148 
 149         struct atmel_sha_dma    dma_lch_in;
 150 
 151         struct atmel_sha_caps   caps;
 152 
 153         struct scatterlist      tmp;
 154 
 155         u32     hw_version;
 156 };
 157 
 158 struct atmel_sha_drv {
 159         struct list_head        dev_list;
 160         spinlock_t              lock;
 161 };
 162 
 163 static struct atmel_sha_drv atmel_sha = {
 164         .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
 165         .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
 166 };
 167 
 168 #ifdef VERBOSE_DEBUG
 169 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr)
 170 {
 171         switch (offset) {
 172         case SHA_CR:
 173                 return "CR";
 174 
 175         case SHA_MR:
 176                 return "MR";
 177 
 178         case SHA_IER:
 179                 return "IER";
 180 
 181         case SHA_IDR:
 182                 return "IDR";
 183 
 184         case SHA_IMR:
 185                 return "IMR";
 186 
 187         case SHA_ISR:
 188                 return "ISR";
 189 
 190         case SHA_MSR:
 191                 return "MSR";
 192 
 193         case SHA_BCR:
 194                 return "BCR";
 195 
 196         case SHA_REG_DIN(0):
 197         case SHA_REG_DIN(1):
 198         case SHA_REG_DIN(2):
 199         case SHA_REG_DIN(3):
 200         case SHA_REG_DIN(4):
 201         case SHA_REG_DIN(5):
 202         case SHA_REG_DIN(6):
 203         case SHA_REG_DIN(7):
 204         case SHA_REG_DIN(8):
 205         case SHA_REG_DIN(9):
 206         case SHA_REG_DIN(10):
 207         case SHA_REG_DIN(11):
 208         case SHA_REG_DIN(12):
 209         case SHA_REG_DIN(13):
 210         case SHA_REG_DIN(14):
 211         case SHA_REG_DIN(15):
 212                 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2);
 213                 break;
 214 
 215         case SHA_REG_DIGEST(0):
 216         case SHA_REG_DIGEST(1):
 217         case SHA_REG_DIGEST(2):
 218         case SHA_REG_DIGEST(3):
 219         case SHA_REG_DIGEST(4):
 220         case SHA_REG_DIGEST(5):
 221         case SHA_REG_DIGEST(6):
 222         case SHA_REG_DIGEST(7):
 223         case SHA_REG_DIGEST(8):
 224         case SHA_REG_DIGEST(9):
 225         case SHA_REG_DIGEST(10):
 226         case SHA_REG_DIGEST(11):
 227         case SHA_REG_DIGEST(12):
 228         case SHA_REG_DIGEST(13):
 229         case SHA_REG_DIGEST(14):
 230         case SHA_REG_DIGEST(15):
 231                 if (wr)
 232                         snprintf(tmp, sz, "IDATAR[%u]",
 233                                  16u + ((offset - SHA_REG_DIGEST(0)) >> 2));
 234                 else
 235                         snprintf(tmp, sz, "ODATAR[%u]",
 236                                  (offset - SHA_REG_DIGEST(0)) >> 2);
 237                 break;
 238 
 239         case SHA_HW_VERSION:
 240                 return "HWVER";
 241 
 242         default:
 243                 snprintf(tmp, sz, "0x%02x", offset);
 244                 break;
 245         }
 246 
 247         return tmp;
 248 }
 249 
 250 #endif /* VERBOSE_DEBUG */
 251 
 252 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
 253 {
 254         u32 value = readl_relaxed(dd->io_base + offset);
 255 
 256 #ifdef VERBOSE_DEBUG
 257         if (dd->flags & SHA_FLAGS_DUMP_REG) {
 258                 char tmp[16];
 259 
 260                 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 261                          atmel_sha_reg_name(offset, tmp, sizeof(tmp), false));
 262         }
 263 #endif /* VERBOSE_DEBUG */
 264 
 265         return value;
 266 }
 267 
 268 static inline void atmel_sha_write(struct atmel_sha_dev *dd,
 269                                         u32 offset, u32 value)
 270 {
 271 #ifdef VERBOSE_DEBUG
 272         if (dd->flags & SHA_FLAGS_DUMP_REG) {
 273                 char tmp[16];
 274 
 275                 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 276                          atmel_sha_reg_name(offset, tmp, sizeof(tmp), true));
 277         }
 278 #endif /* VERBOSE_DEBUG */
 279 
 280         writel_relaxed(value, dd->io_base + offset);
 281 }
 282 
 283 static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
 284 {
 285         struct ahash_request *req = dd->req;
 286 
 287         dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 288                        SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY |
 289                        SHA_FLAGS_DUMP_REG);
 290 
 291         clk_disable(dd->iclk);
 292 
 293         if ((dd->is_async || dd->force_complete) && req->base.complete)
 294                 req->base.complete(&req->base, err);
 295 
 296         /* handle new request */
 297         tasklet_schedule(&dd->queue_task);
 298 
 299         return err;
 300 }
 301 
 302 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
 303 {
 304         size_t count;
 305 
 306         while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
 307                 count = min(ctx->sg->length - ctx->offset, ctx->total);
 308                 count = min(count, ctx->buflen - ctx->bufcnt);
 309 
 310                 if (count <= 0) {
 311                         /*
 312                         * Check if count <= 0 because the buffer is full or
 313                         * because the sg length is 0. In the latest case,
 314                         * check if there is another sg in the list, a 0 length
 315                         * sg doesn't necessarily mean the end of the sg list.
 316                         */
 317                         if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) {
 318                                 ctx->sg = sg_next(ctx->sg);
 319                                 continue;
 320                         } else {
 321                                 break;
 322                         }
 323                 }
 324 
 325                 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 326                         ctx->offset, count, 0);
 327 
 328                 ctx->bufcnt += count;
 329                 ctx->offset += count;
 330                 ctx->total -= count;
 331 
 332                 if (ctx->offset == ctx->sg->length) {
 333                         ctx->sg = sg_next(ctx->sg);
 334                         if (ctx->sg)
 335                                 ctx->offset = 0;
 336                         else
 337                                 ctx->total = 0;
 338                 }
 339         }
 340 
 341         return 0;
 342 }
 343 
 344 /*
 345  * The purpose of this padding is to ensure that the padded message is a
 346  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
 347  * The bit "1" is appended at the end of the message followed by
 348  * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
 349  * 128 bits block (SHA384/SHA512) equals to the message length in bits
 350  * is appended.
 351  *
 352  * For SHA1/SHA224/SHA256, padlen is calculated as followed:
 353  *  - if message length < 56 bytes then padlen = 56 - message length
 354  *  - else padlen = 64 + 56 - message length
 355  *
 356  * For SHA384/SHA512, padlen is calculated as followed:
 357  *  - if message length < 112 bytes then padlen = 112 - message length
 358  *  - else padlen = 128 + 112 - message length
 359  */
 360 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
 361 {
 362         unsigned int index, padlen;
 363         u64 bits[2];
 364         u64 size[2];
 365 
 366         size[0] = ctx->digcnt[0];
 367         size[1] = ctx->digcnt[1];
 368 
 369         size[0] += ctx->bufcnt;
 370         if (size[0] < ctx->bufcnt)
 371                 size[1]++;
 372 
 373         size[0] += length;
 374         if (size[0]  < length)
 375                 size[1]++;
 376 
 377         bits[1] = cpu_to_be64(size[0] << 3);
 378         bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61);
 379 
 380         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 381         case SHA_FLAGS_SHA384:
 382         case SHA_FLAGS_SHA512:
 383                 index = ctx->bufcnt & 0x7f;
 384                 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
 385                 *(ctx->buffer + ctx->bufcnt) = 0x80;
 386                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 387                 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16);
 388                 ctx->bufcnt += padlen + 16;
 389                 ctx->flags |= SHA_FLAGS_PAD;
 390                 break;
 391 
 392         default:
 393                 index = ctx->bufcnt & 0x3f;
 394                 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
 395                 *(ctx->buffer + ctx->bufcnt) = 0x80;
 396                 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 397                 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8);
 398                 ctx->bufcnt += padlen + 8;
 399                 ctx->flags |= SHA_FLAGS_PAD;
 400                 break;
 401         }
 402 }
 403 
 404 static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
 405 {
 406         struct atmel_sha_dev *dd = NULL;
 407         struct atmel_sha_dev *tmp;
 408 
 409         spin_lock_bh(&atmel_sha.lock);
 410         if (!tctx->dd) {
 411                 list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
 412                         dd = tmp;
 413                         break;
 414                 }
 415                 tctx->dd = dd;
 416         } else {
 417                 dd = tctx->dd;
 418         }
 419 
 420         spin_unlock_bh(&atmel_sha.lock);
 421 
 422         return dd;
 423 }
 424 
 425 static int atmel_sha_init(struct ahash_request *req)
 426 {
 427         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 428         struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 429         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 430         struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx);
 431 
 432         ctx->dd = dd;
 433 
 434         ctx->flags = 0;
 435 
 436         dev_dbg(dd->dev, "init: digest size: %d\n",
 437                 crypto_ahash_digestsize(tfm));
 438 
 439         switch (crypto_ahash_digestsize(tfm)) {
 440         case SHA1_DIGEST_SIZE:
 441                 ctx->flags |= SHA_FLAGS_SHA1;
 442                 ctx->block_size = SHA1_BLOCK_SIZE;
 443                 break;
 444         case SHA224_DIGEST_SIZE:
 445                 ctx->flags |= SHA_FLAGS_SHA224;
 446                 ctx->block_size = SHA224_BLOCK_SIZE;
 447                 break;
 448         case SHA256_DIGEST_SIZE:
 449                 ctx->flags |= SHA_FLAGS_SHA256;
 450                 ctx->block_size = SHA256_BLOCK_SIZE;
 451                 break;
 452         case SHA384_DIGEST_SIZE:
 453                 ctx->flags |= SHA_FLAGS_SHA384;
 454                 ctx->block_size = SHA384_BLOCK_SIZE;
 455                 break;
 456         case SHA512_DIGEST_SIZE:
 457                 ctx->flags |= SHA_FLAGS_SHA512;
 458                 ctx->block_size = SHA512_BLOCK_SIZE;
 459                 break;
 460         default:
 461                 return -EINVAL;
 462                 break;
 463         }
 464 
 465         ctx->bufcnt = 0;
 466         ctx->digcnt[0] = 0;
 467         ctx->digcnt[1] = 0;
 468         ctx->buflen = SHA_BUFFER_LEN;
 469 
 470         return 0;
 471 }
 472 
 473 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 474 {
 475         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 476         u32 valmr = SHA_MR_MODE_AUTO;
 477         unsigned int i, hashsize = 0;
 478 
 479         if (likely(dma)) {
 480                 if (!dd->caps.has_dma)
 481                         atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
 482                 valmr = SHA_MR_MODE_PDC;
 483                 if (dd->caps.has_dualbuff)
 484                         valmr |= SHA_MR_DUALBUFF;
 485         } else {
 486                 atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 487         }
 488 
 489         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 490         case SHA_FLAGS_SHA1:
 491                 valmr |= SHA_MR_ALGO_SHA1;
 492                 hashsize = SHA1_DIGEST_SIZE;
 493                 break;
 494 
 495         case SHA_FLAGS_SHA224:
 496                 valmr |= SHA_MR_ALGO_SHA224;
 497                 hashsize = SHA256_DIGEST_SIZE;
 498                 break;
 499 
 500         case SHA_FLAGS_SHA256:
 501                 valmr |= SHA_MR_ALGO_SHA256;
 502                 hashsize = SHA256_DIGEST_SIZE;
 503                 break;
 504 
 505         case SHA_FLAGS_SHA384:
 506                 valmr |= SHA_MR_ALGO_SHA384;
 507                 hashsize = SHA512_DIGEST_SIZE;
 508                 break;
 509 
 510         case SHA_FLAGS_SHA512:
 511                 valmr |= SHA_MR_ALGO_SHA512;
 512                 hashsize = SHA512_DIGEST_SIZE;
 513                 break;
 514 
 515         default:
 516                 break;
 517         }
 518 
 519         /* Setting CR_FIRST only for the first iteration */
 520         if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
 521                 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 522         } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
 523                 const u32 *hash = (const u32 *)ctx->digest;
 524 
 525                 /*
 526                  * Restore the hardware context: update the User Initialize
 527                  * Hash Value (UIHV) with the value saved when the latest
 528                  * 'update' operation completed on this very same crypto
 529                  * request.
 530                  */
 531                 ctx->flags &= ~SHA_FLAGS_RESTORE;
 532                 atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
 533                 for (i = 0; i < hashsize / sizeof(u32); ++i)
 534                         atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
 535                 atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
 536                 valmr |= SHA_MR_UIHV;
 537         }
 538         /*
 539          * WARNING: If the UIHV feature is not available, the hardware CANNOT
 540          * process concurrent requests: the internal registers used to store
 541          * the hash/digest are still set to the partial digest output values
 542          * computed during the latest round.
 543          */
 544 
 545         atmel_sha_write(dd, SHA_MR, valmr);
 546 }
 547 
 548 static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd,
 549                                                 atmel_sha_fn_t resume)
 550 {
 551         u32 isr = atmel_sha_read(dd, SHA_ISR);
 552 
 553         if (unlikely(isr & SHA_INT_DATARDY))
 554                 return resume(dd);
 555 
 556         dd->resume = resume;
 557         atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 558         return -EINPROGRESS;
 559 }
 560 
 561 static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
 562                               size_t length, int final)
 563 {
 564         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 565         int count, len32;
 566         const u32 *buffer = (const u32 *)buf;
 567 
 568         dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 569                 ctx->digcnt[1], ctx->digcnt[0], length, final);
 570 
 571         atmel_sha_write_ctrl(dd, 0);
 572 
 573         /* should be non-zero before next lines to disable clocks later */
 574         ctx->digcnt[0] += length;
 575         if (ctx->digcnt[0] < length)
 576                 ctx->digcnt[1]++;
 577 
 578         if (final)
 579                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 580 
 581         len32 = DIV_ROUND_UP(length, sizeof(u32));
 582 
 583         dd->flags |= SHA_FLAGS_CPU;
 584 
 585         for (count = 0; count < len32; count++)
 586                 atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
 587 
 588         return -EINPROGRESS;
 589 }
 590 
 591 static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 592                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 593 {
 594         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 595         int len32;
 596 
 597         dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 598                 ctx->digcnt[1], ctx->digcnt[0], length1, final);
 599 
 600         len32 = DIV_ROUND_UP(length1, sizeof(u32));
 601         atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
 602         atmel_sha_write(dd, SHA_TPR, dma_addr1);
 603         atmel_sha_write(dd, SHA_TCR, len32);
 604 
 605         len32 = DIV_ROUND_UP(length2, sizeof(u32));
 606         atmel_sha_write(dd, SHA_TNPR, dma_addr2);
 607         atmel_sha_write(dd, SHA_TNCR, len32);
 608 
 609         atmel_sha_write_ctrl(dd, 1);
 610 
 611         /* should be non-zero before next lines to disable clocks later */
 612         ctx->digcnt[0] += length1;
 613         if (ctx->digcnt[0] < length1)
 614                 ctx->digcnt[1]++;
 615 
 616         if (final)
 617                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 618 
 619         dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 620 
 621         /* Start DMA transfer */
 622         atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
 623 
 624         return -EINPROGRESS;
 625 }
 626 
 627 static void atmel_sha_dma_callback(void *data)
 628 {
 629         struct atmel_sha_dev *dd = data;
 630 
 631         dd->is_async = true;
 632 
 633         /* dma_lch_in - completed - wait DATRDY */
 634         atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 635 }
 636 
 637 static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 638                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 639 {
 640         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 641         struct dma_async_tx_descriptor  *in_desc;
 642         struct scatterlist sg[2];
 643 
 644         dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
 645                 ctx->digcnt[1], ctx->digcnt[0], length1, final);
 646 
 647         dd->dma_lch_in.dma_conf.src_maxburst = 16;
 648         dd->dma_lch_in.dma_conf.dst_maxburst = 16;
 649 
 650         dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 651 
 652         if (length2) {
 653                 sg_init_table(sg, 2);
 654                 sg_dma_address(&sg[0]) = dma_addr1;
 655                 sg_dma_len(&sg[0]) = length1;
 656                 sg_dma_address(&sg[1]) = dma_addr2;
 657                 sg_dma_len(&sg[1]) = length2;
 658                 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2,
 659                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 660         } else {
 661                 sg_init_table(sg, 1);
 662                 sg_dma_address(&sg[0]) = dma_addr1;
 663                 sg_dma_len(&sg[0]) = length1;
 664                 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1,
 665                         DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 666         }
 667         if (!in_desc)
 668                 return atmel_sha_complete(dd, -EINVAL);
 669 
 670         in_desc->callback = atmel_sha_dma_callback;
 671         in_desc->callback_param = dd;
 672 
 673         atmel_sha_write_ctrl(dd, 1);
 674 
 675         /* should be non-zero before next lines to disable clocks later */
 676         ctx->digcnt[0] += length1;
 677         if (ctx->digcnt[0] < length1)
 678                 ctx->digcnt[1]++;
 679 
 680         if (final)
 681                 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 682 
 683         dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 684 
 685         /* Start DMA transfer */
 686         dmaengine_submit(in_desc);
 687         dma_async_issue_pending(dd->dma_lch_in.chan);
 688 
 689         return -EINPROGRESS;
 690 }
 691 
 692 static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 693                 size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 694 {
 695         if (dd->caps.has_dma)
 696                 return atmel_sha_xmit_dma(dd, dma_addr1, length1,
 697                                 dma_addr2, length2, final);
 698         else
 699                 return atmel_sha_xmit_pdc(dd, dma_addr1, length1,
 700                                 dma_addr2, length2, final);
 701 }
 702 
 703 static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
 704 {
 705         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 706         int bufcnt;
 707 
 708         atmel_sha_append_sg(ctx);
 709         atmel_sha_fill_padding(ctx, 0);
 710         bufcnt = ctx->bufcnt;
 711         ctx->bufcnt = 0;
 712 
 713         return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 714 }
 715 
 716 static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
 717                                         struct atmel_sha_reqctx *ctx,
 718                                         size_t length, int final)
 719 {
 720         ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 721                                 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 722         if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 723                 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen +
 724                                 ctx->block_size);
 725                 return atmel_sha_complete(dd, -EINVAL);
 726         }
 727 
 728         ctx->flags &= ~SHA_FLAGS_SG;
 729 
 730         /* next call does not fail... so no unmap in the case of error */
 731         return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final);
 732 }
 733 
 734 static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
 735 {
 736         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 737         unsigned int final;
 738         size_t count;
 739 
 740         atmel_sha_append_sg(ctx);
 741 
 742         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 743 
 744         dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n",
 745                  ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final);
 746 
 747         if (final)
 748                 atmel_sha_fill_padding(ctx, 0);
 749 
 750         if (final || (ctx->bufcnt == ctx->buflen)) {
 751                 count = ctx->bufcnt;
 752                 ctx->bufcnt = 0;
 753                 return atmel_sha_xmit_dma_map(dd, ctx, count, final);
 754         }
 755 
 756         return 0;
 757 }
 758 
 759 static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
 760 {
 761         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 762         unsigned int length, final, tail;
 763         struct scatterlist *sg;
 764         unsigned int count;
 765 
 766         if (!ctx->total)
 767                 return 0;
 768 
 769         if (ctx->bufcnt || ctx->offset)
 770                 return atmel_sha_update_dma_slow(dd);
 771 
 772         dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n",
 773                 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total);
 774 
 775         sg = ctx->sg;
 776 
 777         if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 778                 return atmel_sha_update_dma_slow(dd);
 779 
 780         if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size))
 781                 /* size is not ctx->block_size aligned */
 782                 return atmel_sha_update_dma_slow(dd);
 783 
 784         length = min(ctx->total, sg->length);
 785 
 786         if (sg_is_last(sg)) {
 787                 if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 788                         /* not last sg must be ctx->block_size aligned */
 789                         tail = length & (ctx->block_size - 1);
 790                         length -= tail;
 791                 }
 792         }
 793 
 794         ctx->total -= length;
 795         ctx->offset = length; /* offset where to start slow */
 796 
 797         final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 798 
 799         /* Add padding */
 800         if (final) {
 801                 tail = length & (ctx->block_size - 1);
 802                 length -= tail;
 803                 ctx->total += tail;
 804                 ctx->offset = length; /* offset where to start slow */
 805 
 806                 sg = ctx->sg;
 807                 atmel_sha_append_sg(ctx);
 808 
 809                 atmel_sha_fill_padding(ctx, length);
 810 
 811                 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 812                         ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 813                 if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 814                         dev_err(dd->dev, "dma %zu bytes error\n",
 815                                 ctx->buflen + ctx->block_size);
 816                         return atmel_sha_complete(dd, -EINVAL);
 817                 }
 818 
 819                 if (length == 0) {
 820                         ctx->flags &= ~SHA_FLAGS_SG;
 821                         count = ctx->bufcnt;
 822                         ctx->bufcnt = 0;
 823                         return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0,
 824                                         0, final);
 825                 } else {
 826                         ctx->sg = sg;
 827                         if (!dma_map_sg(dd->dev, ctx->sg, 1,
 828                                 DMA_TO_DEVICE)) {
 829                                         dev_err(dd->dev, "dma_map_sg  error\n");
 830                                         return atmel_sha_complete(dd, -EINVAL);
 831                         }
 832 
 833                         ctx->flags |= SHA_FLAGS_SG;
 834 
 835                         count = ctx->bufcnt;
 836                         ctx->bufcnt = 0;
 837                         return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg),
 838                                         length, ctx->dma_addr, count, final);
 839                 }
 840         }
 841 
 842         if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 843                 dev_err(dd->dev, "dma_map_sg  error\n");
 844                 return atmel_sha_complete(dd, -EINVAL);
 845         }
 846 
 847         ctx->flags |= SHA_FLAGS_SG;
 848 
 849         /* next call does not fail... so no unmap in the case of error */
 850         return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0,
 851                                                                 0, final);
 852 }
 853 
 854 static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
 855 {
 856         struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 857 
 858         if (ctx->flags & SHA_FLAGS_SG) {
 859                 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 860                 if (ctx->sg->length == ctx->offset) {
 861                         ctx->sg = sg_next(ctx->sg);
 862                         if (ctx->sg)
 863                                 ctx->offset = 0;
 864                 }
 865                 if (ctx->flags & SHA_FLAGS_PAD) {
 866                         dma_unmap_single(dd->dev, ctx->dma_addr,
 867                                 ctx->buflen + ctx->block_size, DMA_TO_DEVICE);
 868                 }
 869         } else {
 870                 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
 871                                                 ctx->block_size, DMA_TO_DEVICE);
 872         }
 873 
 874         return 0;
 875 }
 876 
 877 static int atmel_sha_update_req(struct atmel_sha_dev *dd)
 878 {
 879         struct ahash_request *req = dd->req;
 880         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 881         int err;
 882 
 883         dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
 884                 ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
 885 
 886         if (ctx->flags & SHA_FLAGS_CPU)
 887                 err = atmel_sha_update_cpu(dd);
 888         else
 889                 err = atmel_sha_update_dma_start(dd);
 890 
 891         /* wait for dma completion before can take more data */
 892         dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n",
 893                         err, ctx->digcnt[1], ctx->digcnt[0]);
 894 
 895         return err;
 896 }
 897 
 898 static int atmel_sha_final_req(struct atmel_sha_dev *dd)
 899 {
 900         struct ahash_request *req = dd->req;
 901         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 902         int err = 0;
 903         int count;
 904 
 905         if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
 906                 atmel_sha_fill_padding(ctx, 0);
 907                 count = ctx->bufcnt;
 908                 ctx->bufcnt = 0;
 909                 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
 910         }
 911         /* faster to handle last block with cpu */
 912         else {
 913                 atmel_sha_fill_padding(ctx, 0);
 914                 count = ctx->bufcnt;
 915                 ctx->bufcnt = 0;
 916                 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
 917         }
 918 
 919         dev_dbg(dd->dev, "final_req: err: %d\n", err);
 920 
 921         return err;
 922 }
 923 
 924 static void atmel_sha_copy_hash(struct ahash_request *req)
 925 {
 926         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 927         u32 *hash = (u32 *)ctx->digest;
 928         unsigned int i, hashsize;
 929 
 930         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 931         case SHA_FLAGS_SHA1:
 932                 hashsize = SHA1_DIGEST_SIZE;
 933                 break;
 934 
 935         case SHA_FLAGS_SHA224:
 936         case SHA_FLAGS_SHA256:
 937                 hashsize = SHA256_DIGEST_SIZE;
 938                 break;
 939 
 940         case SHA_FLAGS_SHA384:
 941         case SHA_FLAGS_SHA512:
 942                 hashsize = SHA512_DIGEST_SIZE;
 943                 break;
 944 
 945         default:
 946                 /* Should not happen... */
 947                 return;
 948         }
 949 
 950         for (i = 0; i < hashsize / sizeof(u32); ++i)
 951                 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
 952         ctx->flags |= SHA_FLAGS_RESTORE;
 953 }
 954 
 955 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
 956 {
 957         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 958 
 959         if (!req->result)
 960                 return;
 961 
 962         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
 963         default:
 964         case SHA_FLAGS_SHA1:
 965                 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
 966                 break;
 967 
 968         case SHA_FLAGS_SHA224:
 969                 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE);
 970                 break;
 971 
 972         case SHA_FLAGS_SHA256:
 973                 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
 974                 break;
 975 
 976         case SHA_FLAGS_SHA384:
 977                 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE);
 978                 break;
 979 
 980         case SHA_FLAGS_SHA512:
 981                 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE);
 982                 break;
 983         }
 984 }
 985 
 986 static int atmel_sha_finish(struct ahash_request *req)
 987 {
 988         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 989         struct atmel_sha_dev *dd = ctx->dd;
 990 
 991         if (ctx->digcnt[0] || ctx->digcnt[1])
 992                 atmel_sha_copy_ready_hash(req);
 993 
 994         dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1],
 995                 ctx->digcnt[0], ctx->bufcnt);
 996 
 997         return 0;
 998 }
 999 
1000 static void atmel_sha_finish_req(struct ahash_request *req, int err)
1001 {
1002         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1003         struct atmel_sha_dev *dd = ctx->dd;
1004 
1005         if (!err) {
1006                 atmel_sha_copy_hash(req);
1007                 if (SHA_FLAGS_FINAL & dd->flags)
1008                         err = atmel_sha_finish(req);
1009         } else {
1010                 ctx->flags |= SHA_FLAGS_ERROR;
1011         }
1012 
1013         /* atomic operation is not needed here */
1014         (void)atmel_sha_complete(dd, err);
1015 }
1016 
1017 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
1018 {
1019         int err;
1020 
1021         err = clk_enable(dd->iclk);
1022         if (err)
1023                 return err;
1024 
1025         if (!(SHA_FLAGS_INIT & dd->flags)) {
1026                 atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
1027                 dd->flags |= SHA_FLAGS_INIT;
1028                 dd->err = 0;
1029         }
1030 
1031         return 0;
1032 }
1033 
1034 static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd)
1035 {
1036         return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff;
1037 }
1038 
1039 static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
1040 {
1041         atmel_sha_hw_init(dd);
1042 
1043         dd->hw_version = atmel_sha_get_version(dd);
1044 
1045         dev_info(dd->dev,
1046                         "version: 0x%x\n", dd->hw_version);
1047 
1048         clk_disable(dd->iclk);
1049 }
1050 
1051 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
1052                                   struct ahash_request *req)
1053 {
1054         struct crypto_async_request *async_req, *backlog;
1055         struct atmel_sha_ctx *ctx;
1056         unsigned long flags;
1057         bool start_async;
1058         int err = 0, ret = 0;
1059 
1060         spin_lock_irqsave(&dd->lock, flags);
1061         if (req)
1062                 ret = ahash_enqueue_request(&dd->queue, req);
1063 
1064         if (SHA_FLAGS_BUSY & dd->flags) {
1065                 spin_unlock_irqrestore(&dd->lock, flags);
1066                 return ret;
1067         }
1068 
1069         backlog = crypto_get_backlog(&dd->queue);
1070         async_req = crypto_dequeue_request(&dd->queue);
1071         if (async_req)
1072                 dd->flags |= SHA_FLAGS_BUSY;
1073 
1074         spin_unlock_irqrestore(&dd->lock, flags);
1075 
1076         if (!async_req)
1077                 return ret;
1078 
1079         if (backlog)
1080                 backlog->complete(backlog, -EINPROGRESS);
1081 
1082         ctx = crypto_tfm_ctx(async_req->tfm);
1083 
1084         dd->req = ahash_request_cast(async_req);
1085         start_async = (dd->req != req);
1086         dd->is_async = start_async;
1087         dd->force_complete = false;
1088 
1089         /* WARNING: ctx->start() MAY change dd->is_async. */
1090         err = ctx->start(dd);
1091         return (start_async) ? ret : err;
1092 }
1093 
1094 static int atmel_sha_done(struct atmel_sha_dev *dd);
1095 
1096 static int atmel_sha_start(struct atmel_sha_dev *dd)
1097 {
1098         struct ahash_request *req = dd->req;
1099         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1100         int err;
1101 
1102         dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1103                                                 ctx->op, req->nbytes);
1104 
1105         err = atmel_sha_hw_init(dd);
1106         if (err)
1107                 return atmel_sha_complete(dd, err);
1108 
1109         /*
1110          * atmel_sha_update_req() and atmel_sha_final_req() can return either:
1111          *  -EINPROGRESS: the hardware is busy and the SHA driver will resume
1112          *                its job later in the done_task.
1113          *                This is the main path.
1114          *
1115          * 0: the SHA driver can continue its job then release the hardware
1116          *    later, if needed, with atmel_sha_finish_req().
1117          *    This is the alternate path.
1118          *
1119          * < 0: an error has occurred so atmel_sha_complete(dd, err) has already
1120          *      been called, hence the hardware has been released.
1121          *      The SHA driver must stop its job without calling
1122          *      atmel_sha_finish_req(), otherwise atmel_sha_complete() would be
1123          *      called a second time.
1124          *
1125          * Please note that currently, atmel_sha_final_req() never returns 0.
1126          */
1127 
1128         dd->resume = atmel_sha_done;
1129         if (ctx->op == SHA_OP_UPDATE) {
1130                 err = atmel_sha_update_req(dd);
1131                 if (!err && (ctx->flags & SHA_FLAGS_FINUP))
1132                         /* no final() after finup() */
1133                         err = atmel_sha_final_req(dd);
1134         } else if (ctx->op == SHA_OP_FINAL) {
1135                 err = atmel_sha_final_req(dd);
1136         }
1137 
1138         if (!err)
1139                 /* done_task will not finish it, so do it here */
1140                 atmel_sha_finish_req(req, err);
1141 
1142         dev_dbg(dd->dev, "exit, err: %d\n", err);
1143 
1144         return err;
1145 }
1146 
1147 static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
1148 {
1149         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1150         struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1151         struct atmel_sha_dev *dd = tctx->dd;
1152 
1153         ctx->op = op;
1154 
1155         return atmel_sha_handle_queue(dd, req);
1156 }
1157 
1158 static int atmel_sha_update(struct ahash_request *req)
1159 {
1160         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1161 
1162         if (!req->nbytes)
1163                 return 0;
1164 
1165         ctx->total = req->nbytes;
1166         ctx->sg = req->src;
1167         ctx->offset = 0;
1168 
1169         if (ctx->flags & SHA_FLAGS_FINUP) {
1170                 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
1171                         /* faster to use CPU for short transfers */
1172                         ctx->flags |= SHA_FLAGS_CPU;
1173         } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1174                 atmel_sha_append_sg(ctx);
1175                 return 0;
1176         }
1177         return atmel_sha_enqueue(req, SHA_OP_UPDATE);
1178 }
1179 
1180 static int atmel_sha_final(struct ahash_request *req)
1181 {
1182         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1183 
1184         ctx->flags |= SHA_FLAGS_FINUP;
1185 
1186         if (ctx->flags & SHA_FLAGS_ERROR)
1187                 return 0; /* uncompleted hash is not needed */
1188 
1189         if (ctx->flags & SHA_FLAGS_PAD)
1190                 /* copy ready hash (+ finalize hmac) */
1191                 return atmel_sha_finish(req);
1192 
1193         return atmel_sha_enqueue(req, SHA_OP_FINAL);
1194 }
1195 
1196 static int atmel_sha_finup(struct ahash_request *req)
1197 {
1198         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1199         int err1, err2;
1200 
1201         ctx->flags |= SHA_FLAGS_FINUP;
1202 
1203         err1 = atmel_sha_update(req);
1204         if (err1 == -EINPROGRESS ||
1205             (err1 == -EBUSY && (ahash_request_flags(req) &
1206                                 CRYPTO_TFM_REQ_MAY_BACKLOG)))
1207                 return err1;
1208 
1209         /*
1210          * final() has to be always called to cleanup resources
1211          * even if udpate() failed, except EINPROGRESS
1212          */
1213         err2 = atmel_sha_final(req);
1214 
1215         return err1 ?: err2;
1216 }
1217 
1218 static int atmel_sha_digest(struct ahash_request *req)
1219 {
1220         return atmel_sha_init(req) ?: atmel_sha_finup(req);
1221 }
1222 
1223 
1224 static int atmel_sha_export(struct ahash_request *req, void *out)
1225 {
1226         const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1227 
1228         memcpy(out, ctx, sizeof(*ctx));
1229         return 0;
1230 }
1231 
1232 static int atmel_sha_import(struct ahash_request *req, const void *in)
1233 {
1234         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1235 
1236         memcpy(ctx, in, sizeof(*ctx));
1237         return 0;
1238 }
1239 
1240 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
1241 {
1242         struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm);
1243 
1244         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1245                                  sizeof(struct atmel_sha_reqctx));
1246         ctx->start = atmel_sha_start;
1247 
1248         return 0;
1249 }
1250 
1251 static struct ahash_alg sha_1_256_algs[] = {
1252 {
1253         .init           = atmel_sha_init,
1254         .update         = atmel_sha_update,
1255         .final          = atmel_sha_final,
1256         .finup          = atmel_sha_finup,
1257         .digest         = atmel_sha_digest,
1258         .export         = atmel_sha_export,
1259         .import         = atmel_sha_import,
1260         .halg = {
1261                 .digestsize     = SHA1_DIGEST_SIZE,
1262                 .statesize      = sizeof(struct atmel_sha_reqctx),
1263                 .base   = {
1264                         .cra_name               = "sha1",
1265                         .cra_driver_name        = "atmel-sha1",
1266                         .cra_priority           = 100,
1267                         .cra_flags              = CRYPTO_ALG_ASYNC,
1268                         .cra_blocksize          = SHA1_BLOCK_SIZE,
1269                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1270                         .cra_alignmask          = 0,
1271                         .cra_module             = THIS_MODULE,
1272                         .cra_init               = atmel_sha_cra_init,
1273                 }
1274         }
1275 },
1276 {
1277         .init           = atmel_sha_init,
1278         .update         = atmel_sha_update,
1279         .final          = atmel_sha_final,
1280         .finup          = atmel_sha_finup,
1281         .digest         = atmel_sha_digest,
1282         .export         = atmel_sha_export,
1283         .import         = atmel_sha_import,
1284         .halg = {
1285                 .digestsize     = SHA256_DIGEST_SIZE,
1286                 .statesize      = sizeof(struct atmel_sha_reqctx),
1287                 .base   = {
1288                         .cra_name               = "sha256",
1289                         .cra_driver_name        = "atmel-sha256",
1290                         .cra_priority           = 100,
1291                         .cra_flags              = CRYPTO_ALG_ASYNC,
1292                         .cra_blocksize          = SHA256_BLOCK_SIZE,
1293                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1294                         .cra_alignmask          = 0,
1295                         .cra_module             = THIS_MODULE,
1296                         .cra_init               = atmel_sha_cra_init,
1297                 }
1298         }
1299 },
1300 };
1301 
1302 static struct ahash_alg sha_224_alg = {
1303         .init           = atmel_sha_init,
1304         .update         = atmel_sha_update,
1305         .final          = atmel_sha_final,
1306         .finup          = atmel_sha_finup,
1307         .digest         = atmel_sha_digest,
1308         .export         = atmel_sha_export,
1309         .import         = atmel_sha_import,
1310         .halg = {
1311                 .digestsize     = SHA224_DIGEST_SIZE,
1312                 .statesize      = sizeof(struct atmel_sha_reqctx),
1313                 .base   = {
1314                         .cra_name               = "sha224",
1315                         .cra_driver_name        = "atmel-sha224",
1316                         .cra_priority           = 100,
1317                         .cra_flags              = CRYPTO_ALG_ASYNC,
1318                         .cra_blocksize          = SHA224_BLOCK_SIZE,
1319                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1320                         .cra_alignmask          = 0,
1321                         .cra_module             = THIS_MODULE,
1322                         .cra_init               = atmel_sha_cra_init,
1323                 }
1324         }
1325 };
1326 
1327 static struct ahash_alg sha_384_512_algs[] = {
1328 {
1329         .init           = atmel_sha_init,
1330         .update         = atmel_sha_update,
1331         .final          = atmel_sha_final,
1332         .finup          = atmel_sha_finup,
1333         .digest         = atmel_sha_digest,
1334         .export         = atmel_sha_export,
1335         .import         = atmel_sha_import,
1336         .halg = {
1337                 .digestsize     = SHA384_DIGEST_SIZE,
1338                 .statesize      = sizeof(struct atmel_sha_reqctx),
1339                 .base   = {
1340                         .cra_name               = "sha384",
1341                         .cra_driver_name        = "atmel-sha384",
1342                         .cra_priority           = 100,
1343                         .cra_flags              = CRYPTO_ALG_ASYNC,
1344                         .cra_blocksize          = SHA384_BLOCK_SIZE,
1345                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1346                         .cra_alignmask          = 0x3,
1347                         .cra_module             = THIS_MODULE,
1348                         .cra_init               = atmel_sha_cra_init,
1349                 }
1350         }
1351 },
1352 {
1353         .init           = atmel_sha_init,
1354         .update         = atmel_sha_update,
1355         .final          = atmel_sha_final,
1356         .finup          = atmel_sha_finup,
1357         .digest         = atmel_sha_digest,
1358         .export         = atmel_sha_export,
1359         .import         = atmel_sha_import,
1360         .halg = {
1361                 .digestsize     = SHA512_DIGEST_SIZE,
1362                 .statesize      = sizeof(struct atmel_sha_reqctx),
1363                 .base   = {
1364                         .cra_name               = "sha512",
1365                         .cra_driver_name        = "atmel-sha512",
1366                         .cra_priority           = 100,
1367                         .cra_flags              = CRYPTO_ALG_ASYNC,
1368                         .cra_blocksize          = SHA512_BLOCK_SIZE,
1369                         .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
1370                         .cra_alignmask          = 0x3,
1371                         .cra_module             = THIS_MODULE,
1372                         .cra_init               = atmel_sha_cra_init,
1373                 }
1374         }
1375 },
1376 };
1377 
1378 static void atmel_sha_queue_task(unsigned long data)
1379 {
1380         struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1381 
1382         atmel_sha_handle_queue(dd, NULL);
1383 }
1384 
1385 static int atmel_sha_done(struct atmel_sha_dev *dd)
1386 {
1387         int err = 0;
1388 
1389         if (SHA_FLAGS_CPU & dd->flags) {
1390                 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1391                         dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
1392                         goto finish;
1393                 }
1394         } else if (SHA_FLAGS_DMA_READY & dd->flags) {
1395                 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
1396                         dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
1397                         atmel_sha_update_dma_stop(dd);
1398                         if (dd->err) {
1399                                 err = dd->err;
1400                                 goto finish;
1401                         }
1402                 }
1403                 if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
1404                         /* hash or semi-hash ready */
1405                         dd->flags &= ~(SHA_FLAGS_DMA_READY |
1406                                                 SHA_FLAGS_OUTPUT_READY);
1407                         err = atmel_sha_update_dma_start(dd);
1408                         if (err != -EINPROGRESS)
1409                                 goto finish;
1410                 }
1411         }
1412         return err;
1413 
1414 finish:
1415         /* finish curent request */
1416         atmel_sha_finish_req(dd->req, err);
1417 
1418         return err;
1419 }
1420 
1421 static void atmel_sha_done_task(unsigned long data)
1422 {
1423         struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
1424 
1425         dd->is_async = true;
1426         (void)dd->resume(dd);
1427 }
1428 
1429 static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
1430 {
1431         struct atmel_sha_dev *sha_dd = dev_id;
1432         u32 reg;
1433 
1434         reg = atmel_sha_read(sha_dd, SHA_ISR);
1435         if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
1436                 atmel_sha_write(sha_dd, SHA_IDR, reg);
1437                 if (SHA_FLAGS_BUSY & sha_dd->flags) {
1438                         sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
1439                         if (!(SHA_FLAGS_CPU & sha_dd->flags))
1440                                 sha_dd->flags |= SHA_FLAGS_DMA_READY;
1441                         tasklet_schedule(&sha_dd->done_task);
1442                 } else {
1443                         dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
1444                 }
1445                 return IRQ_HANDLED;
1446         }
1447 
1448         return IRQ_NONE;
1449 }
1450 
1451 
1452 /* DMA transfer functions */
1453 
1454 static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd,
1455                                         struct scatterlist *sg,
1456                                         size_t len)
1457 {
1458         struct atmel_sha_dma *dma = &dd->dma_lch_in;
1459         struct ahash_request *req = dd->req;
1460         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1461         size_t bs = ctx->block_size;
1462         int nents;
1463 
1464         for (nents = 0; sg; sg = sg_next(sg), ++nents) {
1465                 if (!IS_ALIGNED(sg->offset, sizeof(u32)))
1466                         return false;
1467 
1468                 /*
1469                  * This is the last sg, the only one that is allowed to
1470                  * have an unaligned length.
1471                  */
1472                 if (len <= sg->length) {
1473                         dma->nents = nents + 1;
1474                         dma->last_sg_length = sg->length;
1475                         sg->length = ALIGN(len, sizeof(u32));
1476                         return true;
1477                 }
1478 
1479                 /* All other sg lengths MUST be aligned to the block size. */
1480                 if (!IS_ALIGNED(sg->length, bs))
1481                         return false;
1482 
1483                 len -= sg->length;
1484         }
1485 
1486         return false;
1487 }
1488 
1489 static void atmel_sha_dma_callback2(void *data)
1490 {
1491         struct atmel_sha_dev *dd = data;
1492         struct atmel_sha_dma *dma = &dd->dma_lch_in;
1493         struct scatterlist *sg;
1494         int nents;
1495 
1496         dmaengine_terminate_all(dma->chan);
1497         dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1498 
1499         sg = dma->sg;
1500         for (nents = 0; nents < dma->nents - 1; ++nents)
1501                 sg = sg_next(sg);
1502         sg->length = dma->last_sg_length;
1503 
1504         dd->is_async = true;
1505         (void)atmel_sha_wait_for_data_ready(dd, dd->resume);
1506 }
1507 
1508 static int atmel_sha_dma_start(struct atmel_sha_dev *dd,
1509                                struct scatterlist *src,
1510                                size_t len,
1511                                atmel_sha_fn_t resume)
1512 {
1513         struct atmel_sha_dma *dma = &dd->dma_lch_in;
1514         struct dma_slave_config *config = &dma->dma_conf;
1515         struct dma_chan *chan = dma->chan;
1516         struct dma_async_tx_descriptor *desc;
1517         dma_cookie_t cookie;
1518         unsigned int sg_len;
1519         int err;
1520 
1521         dd->resume = resume;
1522 
1523         /*
1524          * dma->nents has already been initialized by
1525          * atmel_sha_dma_check_aligned().
1526          */
1527         dma->sg = src;
1528         sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1529         if (!sg_len) {
1530                 err = -ENOMEM;
1531                 goto exit;
1532         }
1533 
1534         config->src_maxburst = 16;
1535         config->dst_maxburst = 16;
1536         err = dmaengine_slave_config(chan, config);
1537         if (err)
1538                 goto unmap_sg;
1539 
1540         desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV,
1541                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1542         if (!desc) {
1543                 err = -ENOMEM;
1544                 goto unmap_sg;
1545         }
1546 
1547         desc->callback = atmel_sha_dma_callback2;
1548         desc->callback_param = dd;
1549         cookie = dmaengine_submit(desc);
1550         err = dma_submit_error(cookie);
1551         if (err)
1552                 goto unmap_sg;
1553 
1554         dma_async_issue_pending(chan);
1555 
1556         return -EINPROGRESS;
1557 
1558 unmap_sg:
1559         dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE);
1560 exit:
1561         return atmel_sha_complete(dd, err);
1562 }
1563 
1564 
1565 /* CPU transfer functions */
1566 
1567 static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd)
1568 {
1569         struct ahash_request *req = dd->req;
1570         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1571         const u32 *words = (const u32 *)ctx->buffer;
1572         size_t i, num_words;
1573         u32 isr, din, din_inc;
1574 
1575         din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1;
1576         for (;;) {
1577                 /* Write data into the Input Data Registers. */
1578                 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32));
1579                 for (i = 0, din = 0; i < num_words; ++i, din += din_inc)
1580                         atmel_sha_write(dd, SHA_REG_DIN(din), words[i]);
1581 
1582                 ctx->offset += ctx->bufcnt;
1583                 ctx->total -= ctx->bufcnt;
1584 
1585                 if (!ctx->total)
1586                         break;
1587 
1588                 /*
1589                  * Prepare next block:
1590                  * Fill ctx->buffer now with the next data to be written into
1591                  * IDATARx: it gives time for the SHA hardware to process
1592                  * the current data so the SHA_INT_DATARDY flag might be set
1593                  * in SHA_ISR when polling this register at the beginning of
1594                  * the next loop.
1595                  */
1596                 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1597                 scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1598                                          ctx->offset, ctx->bufcnt, 0);
1599 
1600                 /* Wait for hardware to be ready again. */
1601                 isr = atmel_sha_read(dd, SHA_ISR);
1602                 if (!(isr & SHA_INT_DATARDY)) {
1603                         /* Not ready yet. */
1604                         dd->resume = atmel_sha_cpu_transfer;
1605                         atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
1606                         return -EINPROGRESS;
1607                 }
1608         }
1609 
1610         if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY)))
1611                 return dd->cpu_transfer_complete(dd);
1612 
1613         return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete);
1614 }
1615 
1616 static int atmel_sha_cpu_start(struct atmel_sha_dev *dd,
1617                                struct scatterlist *sg,
1618                                unsigned int len,
1619                                bool idatar0_only,
1620                                bool wait_data_ready,
1621                                atmel_sha_fn_t resume)
1622 {
1623         struct ahash_request *req = dd->req;
1624         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1625 
1626         if (!len)
1627                 return resume(dd);
1628 
1629         ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY);
1630 
1631         if (idatar0_only)
1632                 ctx->flags |= SHA_FLAGS_IDATAR0;
1633 
1634         if (wait_data_ready)
1635                 ctx->flags |= SHA_FLAGS_WAIT_DATARDY;
1636 
1637         ctx->sg = sg;
1638         ctx->total = len;
1639         ctx->offset = 0;
1640 
1641         /* Prepare the first block to be written. */
1642         ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total);
1643         scatterwalk_map_and_copy(ctx->buffer, ctx->sg,
1644                                  ctx->offset, ctx->bufcnt, 0);
1645 
1646         dd->cpu_transfer_complete = resume;
1647         return atmel_sha_cpu_transfer(dd);
1648 }
1649 
1650 static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd,
1651                               const void *data, unsigned int datalen,
1652                               bool auto_padding,
1653                               atmel_sha_fn_t resume)
1654 {
1655         struct ahash_request *req = dd->req;
1656         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1657         u32 msglen = (auto_padding) ? datalen : 0;
1658         u32 mr = SHA_MR_MODE_AUTO;
1659 
1660         if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding))
1661                 return atmel_sha_complete(dd, -EINVAL);
1662 
1663         mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1664         atmel_sha_write(dd, SHA_MR, mr);
1665         atmel_sha_write(dd, SHA_MSR, msglen);
1666         atmel_sha_write(dd, SHA_BCR, msglen);
1667         atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1668 
1669         sg_init_one(&dd->tmp, data, datalen);
1670         return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume);
1671 }
1672 
1673 
1674 /* hmac functions */
1675 
1676 struct atmel_sha_hmac_key {
1677         bool                    valid;
1678         unsigned int            keylen;
1679         u8                      buffer[SHA512_BLOCK_SIZE];
1680         u8                      *keydup;
1681 };
1682 
1683 static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey)
1684 {
1685         memset(hkey, 0, sizeof(*hkey));
1686 }
1687 
1688 static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey)
1689 {
1690         kfree(hkey->keydup);
1691         memset(hkey, 0, sizeof(*hkey));
1692 }
1693 
1694 static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey,
1695                                          const u8 *key,
1696                                          unsigned int keylen)
1697 {
1698         atmel_sha_hmac_key_release(hkey);
1699 
1700         if (keylen > sizeof(hkey->buffer)) {
1701                 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL);
1702                 if (!hkey->keydup)
1703                         return -ENOMEM;
1704 
1705         } else {
1706                 memcpy(hkey->buffer, key, keylen);
1707         }
1708 
1709         hkey->valid = true;
1710         hkey->keylen = keylen;
1711         return 0;
1712 }
1713 
1714 static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey,
1715                                           const u8 **key,
1716                                           unsigned int *keylen)
1717 {
1718         if (!hkey->valid)
1719                 return false;
1720 
1721         *keylen = hkey->keylen;
1722         *key = (hkey->keydup) ? hkey->keydup : hkey->buffer;
1723         return true;
1724 }
1725 
1726 
1727 struct atmel_sha_hmac_ctx {
1728         struct atmel_sha_ctx    base;
1729 
1730         struct atmel_sha_hmac_key       hkey;
1731         u32                     ipad[SHA512_BLOCK_SIZE / sizeof(u32)];
1732         u32                     opad[SHA512_BLOCK_SIZE / sizeof(u32)];
1733         atmel_sha_fn_t          resume;
1734 };
1735 
1736 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1737                                 atmel_sha_fn_t resume);
1738 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1739                                       const u8 *key, unsigned int keylen);
1740 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd);
1741 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd);
1742 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd);
1743 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd);
1744 
1745 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd);
1746 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd);
1747 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd);
1748 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd);
1749 
1750 static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd,
1751                                 atmel_sha_fn_t resume)
1752 {
1753         struct ahash_request *req = dd->req;
1754         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1755         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1756         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1757         unsigned int keylen;
1758         const u8 *key;
1759         size_t bs;
1760 
1761         hmac->resume = resume;
1762         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
1763         case SHA_FLAGS_SHA1:
1764                 ctx->block_size = SHA1_BLOCK_SIZE;
1765                 ctx->hash_size = SHA1_DIGEST_SIZE;
1766                 break;
1767 
1768         case SHA_FLAGS_SHA224:
1769                 ctx->block_size = SHA224_BLOCK_SIZE;
1770                 ctx->hash_size = SHA256_DIGEST_SIZE;
1771                 break;
1772 
1773         case SHA_FLAGS_SHA256:
1774                 ctx->block_size = SHA256_BLOCK_SIZE;
1775                 ctx->hash_size = SHA256_DIGEST_SIZE;
1776                 break;
1777 
1778         case SHA_FLAGS_SHA384:
1779                 ctx->block_size = SHA384_BLOCK_SIZE;
1780                 ctx->hash_size = SHA512_DIGEST_SIZE;
1781                 break;
1782 
1783         case SHA_FLAGS_SHA512:
1784                 ctx->block_size = SHA512_BLOCK_SIZE;
1785                 ctx->hash_size = SHA512_DIGEST_SIZE;
1786                 break;
1787 
1788         default:
1789                 return atmel_sha_complete(dd, -EINVAL);
1790         }
1791         bs = ctx->block_size;
1792 
1793         if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen)))
1794                 return resume(dd);
1795 
1796         /* Compute K' from K. */
1797         if (unlikely(keylen > bs))
1798                 return atmel_sha_hmac_prehash_key(dd, key, keylen);
1799 
1800         /* Prepare ipad. */
1801         memcpy((u8 *)hmac->ipad, key, keylen);
1802         memset((u8 *)hmac->ipad + keylen, 0, bs - keylen);
1803         return atmel_sha_hmac_compute_ipad_hash(dd);
1804 }
1805 
1806 static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd,
1807                                       const u8 *key, unsigned int keylen)
1808 {
1809         return atmel_sha_cpu_hash(dd, key, keylen, true,
1810                                   atmel_sha_hmac_prehash_key_done);
1811 }
1812 
1813 static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd)
1814 {
1815         struct ahash_request *req = dd->req;
1816         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1817         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1818         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1819         size_t ds = crypto_ahash_digestsize(tfm);
1820         size_t bs = ctx->block_size;
1821         size_t i, num_words = ds / sizeof(u32);
1822 
1823         /* Prepare ipad. */
1824         for (i = 0; i < num_words; ++i)
1825                 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1826         memset((u8 *)hmac->ipad + ds, 0, bs - ds);
1827         return atmel_sha_hmac_compute_ipad_hash(dd);
1828 }
1829 
1830 static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
1831 {
1832         struct ahash_request *req = dd->req;
1833         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1834         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1835         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1836         size_t bs = ctx->block_size;
1837         size_t i, num_words = bs / sizeof(u32);
1838 
1839         memcpy(hmac->opad, hmac->ipad, bs);
1840         for (i = 0; i < num_words; ++i) {
1841                 hmac->ipad[i] ^= 0x36363636;
1842                 hmac->opad[i] ^= 0x5c5c5c5c;
1843         }
1844 
1845         return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false,
1846                                   atmel_sha_hmac_compute_opad_hash);
1847 }
1848 
1849 static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd)
1850 {
1851         struct ahash_request *req = dd->req;
1852         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1853         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1854         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1855         size_t bs = ctx->block_size;
1856         size_t hs = ctx->hash_size;
1857         size_t i, num_words = hs / sizeof(u32);
1858 
1859         for (i = 0; i < num_words; ++i)
1860                 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1861         return atmel_sha_cpu_hash(dd, hmac->opad, bs, false,
1862                                   atmel_sha_hmac_setup_done);
1863 }
1864 
1865 static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd)
1866 {
1867         struct ahash_request *req = dd->req;
1868         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1869         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1870         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1871         size_t hs = ctx->hash_size;
1872         size_t i, num_words = hs / sizeof(u32);
1873 
1874         for (i = 0; i < num_words; ++i)
1875                 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1876         atmel_sha_hmac_key_release(&hmac->hkey);
1877         return hmac->resume(dd);
1878 }
1879 
1880 static int atmel_sha_hmac_start(struct atmel_sha_dev *dd)
1881 {
1882         struct ahash_request *req = dd->req;
1883         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1884         int err;
1885 
1886         err = atmel_sha_hw_init(dd);
1887         if (err)
1888                 return atmel_sha_complete(dd, err);
1889 
1890         switch (ctx->op) {
1891         case SHA_OP_INIT:
1892                 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done);
1893                 break;
1894 
1895         case SHA_OP_UPDATE:
1896                 dd->resume = atmel_sha_done;
1897                 err = atmel_sha_update_req(dd);
1898                 break;
1899 
1900         case SHA_OP_FINAL:
1901                 dd->resume = atmel_sha_hmac_final;
1902                 err = atmel_sha_final_req(dd);
1903                 break;
1904 
1905         case SHA_OP_DIGEST:
1906                 err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2);
1907                 break;
1908 
1909         default:
1910                 return atmel_sha_complete(dd, -EINVAL);
1911         }
1912 
1913         return err;
1914 }
1915 
1916 static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
1917                                  unsigned int keylen)
1918 {
1919         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1920 
1921         return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen);
1922 }
1923 
1924 static int atmel_sha_hmac_init(struct ahash_request *req)
1925 {
1926         int err;
1927 
1928         err = atmel_sha_init(req);
1929         if (err)
1930                 return err;
1931 
1932         return atmel_sha_enqueue(req, SHA_OP_INIT);
1933 }
1934 
1935 static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd)
1936 {
1937         struct ahash_request *req = dd->req;
1938         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1939         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1940         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1941         size_t bs = ctx->block_size;
1942         size_t hs = ctx->hash_size;
1943 
1944         ctx->bufcnt = 0;
1945         ctx->digcnt[0] = bs;
1946         ctx->digcnt[1] = 0;
1947         ctx->flags |= SHA_FLAGS_RESTORE;
1948         memcpy(ctx->digest, hmac->ipad, hs);
1949         return atmel_sha_complete(dd, 0);
1950 }
1951 
1952 static int atmel_sha_hmac_final(struct atmel_sha_dev *dd)
1953 {
1954         struct ahash_request *req = dd->req;
1955         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
1956         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1957         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
1958         u32 *digest = (u32 *)ctx->digest;
1959         size_t ds = crypto_ahash_digestsize(tfm);
1960         size_t bs = ctx->block_size;
1961         size_t hs = ctx->hash_size;
1962         size_t i, num_words;
1963         u32 mr;
1964 
1965         /* Save d = SHA((K' + ipad) | msg). */
1966         num_words = ds / sizeof(u32);
1967         for (i = 0; i < num_words; ++i)
1968                 digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
1969 
1970         /* Restore context to finish computing SHA((K' + opad) | d). */
1971         atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
1972         num_words = hs / sizeof(u32);
1973         for (i = 0; i < num_words; ++i)
1974                 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
1975 
1976         mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV;
1977         mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK);
1978         atmel_sha_write(dd, SHA_MR, mr);
1979         atmel_sha_write(dd, SHA_MSR, bs + ds);
1980         atmel_sha_write(dd, SHA_BCR, ds);
1981         atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
1982 
1983         sg_init_one(&dd->tmp, digest, ds);
1984         return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true,
1985                                    atmel_sha_hmac_final_done);
1986 }
1987 
1988 static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd)
1989 {
1990         /*
1991          * req->result might not be sizeof(u32) aligned, so copy the
1992          * digest into ctx->digest[] before memcpy() the data into
1993          * req->result.
1994          */
1995         atmel_sha_copy_hash(dd->req);
1996         atmel_sha_copy_ready_hash(dd->req);
1997         return atmel_sha_complete(dd, 0);
1998 }
1999 
2000 static int atmel_sha_hmac_digest(struct ahash_request *req)
2001 {
2002         int err;
2003 
2004         err = atmel_sha_init(req);
2005         if (err)
2006                 return err;
2007 
2008         return atmel_sha_enqueue(req, SHA_OP_DIGEST);
2009 }
2010 
2011 static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd)
2012 {
2013         struct ahash_request *req = dd->req;
2014         struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
2015         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2016         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2017         size_t hs = ctx->hash_size;
2018         size_t i, num_words = hs / sizeof(u32);
2019         bool use_dma = false;
2020         u32 mr;
2021 
2022         /* Special case for empty message. */
2023         if (!req->nbytes)
2024                 return atmel_sha_complete(dd, -EINVAL); // TODO:
2025 
2026         /* Check DMA threshold and alignment. */
2027         if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD &&
2028             atmel_sha_dma_check_aligned(dd, req->src, req->nbytes))
2029                 use_dma = true;
2030 
2031         /* Write both initial hash values to compute a HMAC. */
2032         atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2033         for (i = 0; i < num_words; ++i)
2034                 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2035 
2036         atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2037         for (i = 0; i < num_words; ++i)
2038                 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2039 
2040         /* Write the Mode, Message Size, Bytes Count then Control Registers. */
2041         mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF);
2042         mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2043         if (use_dma)
2044                 mr |= SHA_MR_MODE_IDATAR0;
2045         else
2046                 mr |= SHA_MR_MODE_AUTO;
2047         atmel_sha_write(dd, SHA_MR, mr);
2048 
2049         atmel_sha_write(dd, SHA_MSR, req->nbytes);
2050         atmel_sha_write(dd, SHA_BCR, req->nbytes);
2051 
2052         atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2053 
2054         /* Process data. */
2055         if (use_dma)
2056                 return atmel_sha_dma_start(dd, req->src, req->nbytes,
2057                                            atmel_sha_hmac_final_done);
2058 
2059         return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true,
2060                                    atmel_sha_hmac_final_done);
2061 }
2062 
2063 static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm)
2064 {
2065         struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2066 
2067         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2068                                  sizeof(struct atmel_sha_reqctx));
2069         hmac->base.start = atmel_sha_hmac_start;
2070         atmel_sha_hmac_key_init(&hmac->hkey);
2071 
2072         return 0;
2073 }
2074 
2075 static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm)
2076 {
2077         struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm);
2078 
2079         atmel_sha_hmac_key_release(&hmac->hkey);
2080 }
2081 
2082 static struct ahash_alg sha_hmac_algs[] = {
2083 {
2084         .init           = atmel_sha_hmac_init,
2085         .update         = atmel_sha_update,
2086         .final          = atmel_sha_final,
2087         .digest         = atmel_sha_hmac_digest,
2088         .setkey         = atmel_sha_hmac_setkey,
2089         .export         = atmel_sha_export,
2090         .import         = atmel_sha_import,
2091         .halg = {
2092                 .digestsize     = SHA1_DIGEST_SIZE,
2093                 .statesize      = sizeof(struct atmel_sha_reqctx),
2094                 .base   = {
2095                         .cra_name               = "hmac(sha1)",
2096                         .cra_driver_name        = "atmel-hmac-sha1",
2097                         .cra_priority           = 100,
2098                         .cra_flags              = CRYPTO_ALG_ASYNC,
2099                         .cra_blocksize          = SHA1_BLOCK_SIZE,
2100                         .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2101                         .cra_alignmask          = 0,
2102                         .cra_module             = THIS_MODULE,
2103                         .cra_init               = atmel_sha_hmac_cra_init,
2104                         .cra_exit               = atmel_sha_hmac_cra_exit,
2105                 }
2106         }
2107 },
2108 {
2109         .init           = atmel_sha_hmac_init,
2110         .update         = atmel_sha_update,
2111         .final          = atmel_sha_final,
2112         .digest         = atmel_sha_hmac_digest,
2113         .setkey         = atmel_sha_hmac_setkey,
2114         .export         = atmel_sha_export,
2115         .import         = atmel_sha_import,
2116         .halg = {
2117                 .digestsize     = SHA224_DIGEST_SIZE,
2118                 .statesize      = sizeof(struct atmel_sha_reqctx),
2119                 .base   = {
2120                         .cra_name               = "hmac(sha224)",
2121                         .cra_driver_name        = "atmel-hmac-sha224",
2122                         .cra_priority           = 100,
2123                         .cra_flags              = CRYPTO_ALG_ASYNC,
2124                         .cra_blocksize          = SHA224_BLOCK_SIZE,
2125                         .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2126                         .cra_alignmask          = 0,
2127                         .cra_module             = THIS_MODULE,
2128                         .cra_init               = atmel_sha_hmac_cra_init,
2129                         .cra_exit               = atmel_sha_hmac_cra_exit,
2130                 }
2131         }
2132 },
2133 {
2134         .init           = atmel_sha_hmac_init,
2135         .update         = atmel_sha_update,
2136         .final          = atmel_sha_final,
2137         .digest         = atmel_sha_hmac_digest,
2138         .setkey         = atmel_sha_hmac_setkey,
2139         .export         = atmel_sha_export,
2140         .import         = atmel_sha_import,
2141         .halg = {
2142                 .digestsize     = SHA256_DIGEST_SIZE,
2143                 .statesize      = sizeof(struct atmel_sha_reqctx),
2144                 .base   = {
2145                         .cra_name               = "hmac(sha256)",
2146                         .cra_driver_name        = "atmel-hmac-sha256",
2147                         .cra_priority           = 100,
2148                         .cra_flags              = CRYPTO_ALG_ASYNC,
2149                         .cra_blocksize          = SHA256_BLOCK_SIZE,
2150                         .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2151                         .cra_alignmask          = 0,
2152                         .cra_module             = THIS_MODULE,
2153                         .cra_init               = atmel_sha_hmac_cra_init,
2154                         .cra_exit               = atmel_sha_hmac_cra_exit,
2155                 }
2156         }
2157 },
2158 {
2159         .init           = atmel_sha_hmac_init,
2160         .update         = atmel_sha_update,
2161         .final          = atmel_sha_final,
2162         .digest         = atmel_sha_hmac_digest,
2163         .setkey         = atmel_sha_hmac_setkey,
2164         .export         = atmel_sha_export,
2165         .import         = atmel_sha_import,
2166         .halg = {
2167                 .digestsize     = SHA384_DIGEST_SIZE,
2168                 .statesize      = sizeof(struct atmel_sha_reqctx),
2169                 .base   = {
2170                         .cra_name               = "hmac(sha384)",
2171                         .cra_driver_name        = "atmel-hmac-sha384",
2172                         .cra_priority           = 100,
2173                         .cra_flags              = CRYPTO_ALG_ASYNC,
2174                         .cra_blocksize          = SHA384_BLOCK_SIZE,
2175                         .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2176                         .cra_alignmask          = 0,
2177                         .cra_module             = THIS_MODULE,
2178                         .cra_init               = atmel_sha_hmac_cra_init,
2179                         .cra_exit               = atmel_sha_hmac_cra_exit,
2180                 }
2181         }
2182 },
2183 {
2184         .init           = atmel_sha_hmac_init,
2185         .update         = atmel_sha_update,
2186         .final          = atmel_sha_final,
2187         .digest         = atmel_sha_hmac_digest,
2188         .setkey         = atmel_sha_hmac_setkey,
2189         .export         = atmel_sha_export,
2190         .import         = atmel_sha_import,
2191         .halg = {
2192                 .digestsize     = SHA512_DIGEST_SIZE,
2193                 .statesize      = sizeof(struct atmel_sha_reqctx),
2194                 .base   = {
2195                         .cra_name               = "hmac(sha512)",
2196                         .cra_driver_name        = "atmel-hmac-sha512",
2197                         .cra_priority           = 100,
2198                         .cra_flags              = CRYPTO_ALG_ASYNC,
2199                         .cra_blocksize          = SHA512_BLOCK_SIZE,
2200                         .cra_ctxsize            = sizeof(struct atmel_sha_hmac_ctx),
2201                         .cra_alignmask          = 0,
2202                         .cra_module             = THIS_MODULE,
2203                         .cra_init               = atmel_sha_hmac_cra_init,
2204                         .cra_exit               = atmel_sha_hmac_cra_exit,
2205                 }
2206         }
2207 },
2208 };
2209 
2210 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
2211 /* authenc functions */
2212 
2213 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd);
2214 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd);
2215 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd);
2216 
2217 
2218 struct atmel_sha_authenc_ctx {
2219         struct crypto_ahash     *tfm;
2220 };
2221 
2222 struct atmel_sha_authenc_reqctx {
2223         struct atmel_sha_reqctx base;
2224 
2225         atmel_aes_authenc_fn_t  cb;
2226         struct atmel_aes_dev    *aes_dev;
2227 
2228         /* _init() parameters. */
2229         struct scatterlist      *assoc;
2230         u32                     assoclen;
2231         u32                     textlen;
2232 
2233         /* _final() parameters. */
2234         u32                     *digest;
2235         unsigned int            digestlen;
2236 };
2237 
2238 static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
2239                                        int err)
2240 {
2241         struct ahash_request *req = areq->data;
2242         struct atmel_sha_authenc_reqctx *authctx  = ahash_request_ctx(req);
2243 
2244         authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
2245 }
2246 
2247 static int atmel_sha_authenc_start(struct atmel_sha_dev *dd)
2248 {
2249         struct ahash_request *req = dd->req;
2250         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2251         int err;
2252 
2253         /*
2254          * Force atmel_sha_complete() to call req->base.complete(), ie
2255          * atmel_sha_authenc_complete(), which in turn calls authctx->cb().
2256          */
2257         dd->force_complete = true;
2258 
2259         err = atmel_sha_hw_init(dd);
2260         return authctx->cb(authctx->aes_dev, err, dd->is_async);
2261 }
2262 
2263 bool atmel_sha_authenc_is_ready(void)
2264 {
2265         struct atmel_sha_ctx dummy;
2266 
2267         dummy.dd = NULL;
2268         return (atmel_sha_find_dev(&dummy) != NULL);
2269 }
2270 EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready);
2271 
2272 unsigned int atmel_sha_authenc_get_reqsize(void)
2273 {
2274         return sizeof(struct atmel_sha_authenc_reqctx);
2275 }
2276 EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize);
2277 
2278 struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode)
2279 {
2280         struct atmel_sha_authenc_ctx *auth;
2281         struct crypto_ahash *tfm;
2282         struct atmel_sha_ctx *tctx;
2283         const char *name;
2284         int err = -EINVAL;
2285 
2286         switch (mode & SHA_FLAGS_MODE_MASK) {
2287         case SHA_FLAGS_HMAC_SHA1:
2288                 name = "atmel-hmac-sha1";
2289                 break;
2290 
2291         case SHA_FLAGS_HMAC_SHA224:
2292                 name = "atmel-hmac-sha224";
2293                 break;
2294 
2295         case SHA_FLAGS_HMAC_SHA256:
2296                 name = "atmel-hmac-sha256";
2297                 break;
2298 
2299         case SHA_FLAGS_HMAC_SHA384:
2300                 name = "atmel-hmac-sha384";
2301                 break;
2302 
2303         case SHA_FLAGS_HMAC_SHA512:
2304                 name = "atmel-hmac-sha512";
2305                 break;
2306 
2307         default:
2308                 goto error;
2309         }
2310 
2311         tfm = crypto_alloc_ahash(name, 0, 0);
2312         if (IS_ERR(tfm)) {
2313                 err = PTR_ERR(tfm);
2314                 goto error;
2315         }
2316         tctx = crypto_ahash_ctx(tfm);
2317         tctx->start = atmel_sha_authenc_start;
2318         tctx->flags = mode;
2319 
2320         auth = kzalloc(sizeof(*auth), GFP_KERNEL);
2321         if (!auth) {
2322                 err = -ENOMEM;
2323                 goto err_free_ahash;
2324         }
2325         auth->tfm = tfm;
2326 
2327         return auth;
2328 
2329 err_free_ahash:
2330         crypto_free_ahash(tfm);
2331 error:
2332         return ERR_PTR(err);
2333 }
2334 EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn);
2335 
2336 void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth)
2337 {
2338         if (auth)
2339                 crypto_free_ahash(auth->tfm);
2340         kfree(auth);
2341 }
2342 EXPORT_SYMBOL_GPL(atmel_sha_authenc_free);
2343 
2344 int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth,
2345                              const u8 *key, unsigned int keylen,
2346                              u32 *flags)
2347 {
2348         struct crypto_ahash *tfm = auth->tfm;
2349         int err;
2350 
2351         crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
2352         crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK);
2353         err = crypto_ahash_setkey(tfm, key, keylen);
2354         *flags = crypto_ahash_get_flags(tfm);
2355 
2356         return err;
2357 }
2358 EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey);
2359 
2360 int atmel_sha_authenc_schedule(struct ahash_request *req,
2361                                struct atmel_sha_authenc_ctx *auth,
2362                                atmel_aes_authenc_fn_t cb,
2363                                struct atmel_aes_dev *aes_dev)
2364 {
2365         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2366         struct atmel_sha_reqctx *ctx = &authctx->base;
2367         struct crypto_ahash *tfm = auth->tfm;
2368         struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
2369         struct atmel_sha_dev *dd;
2370 
2371         /* Reset request context (MUST be done first). */
2372         memset(authctx, 0, sizeof(*authctx));
2373 
2374         /* Get SHA device. */
2375         dd = atmel_sha_find_dev(tctx);
2376         if (!dd)
2377                 return cb(aes_dev, -ENODEV, false);
2378 
2379         /* Init request context. */
2380         ctx->dd = dd;
2381         ctx->buflen = SHA_BUFFER_LEN;
2382         authctx->cb = cb;
2383         authctx->aes_dev = aes_dev;
2384         ahash_request_set_tfm(req, tfm);
2385         ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req);
2386 
2387         return atmel_sha_handle_queue(dd, req);
2388 }
2389 EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule);
2390 
2391 int atmel_sha_authenc_init(struct ahash_request *req,
2392                            struct scatterlist *assoc, unsigned int assoclen,
2393                            unsigned int textlen,
2394                            atmel_aes_authenc_fn_t cb,
2395                            struct atmel_aes_dev *aes_dev)
2396 {
2397         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2398         struct atmel_sha_reqctx *ctx = &authctx->base;
2399         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2400         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2401         struct atmel_sha_dev *dd = ctx->dd;
2402 
2403         if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32))))
2404                 return atmel_sha_complete(dd, -EINVAL);
2405 
2406         authctx->cb = cb;
2407         authctx->aes_dev = aes_dev;
2408         authctx->assoc = assoc;
2409         authctx->assoclen = assoclen;
2410         authctx->textlen = textlen;
2411 
2412         ctx->flags = hmac->base.flags;
2413         return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2);
2414 }
2415 EXPORT_SYMBOL_GPL(atmel_sha_authenc_init);
2416 
2417 static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd)
2418 {
2419         struct ahash_request *req = dd->req;
2420         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2421         struct atmel_sha_reqctx *ctx = &authctx->base;
2422         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2423         struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm);
2424         size_t hs = ctx->hash_size;
2425         size_t i, num_words = hs / sizeof(u32);
2426         u32 mr, msg_size;
2427 
2428         atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
2429         for (i = 0; i < num_words; ++i)
2430                 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]);
2431 
2432         atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV);
2433         for (i = 0; i < num_words; ++i)
2434                 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]);
2435 
2436         mr = (SHA_MR_MODE_IDATAR0 |
2437               SHA_MR_HMAC |
2438               SHA_MR_DUALBUFF);
2439         mr |= ctx->flags & SHA_FLAGS_ALGO_MASK;
2440         atmel_sha_write(dd, SHA_MR, mr);
2441 
2442         msg_size = authctx->assoclen + authctx->textlen;
2443         atmel_sha_write(dd, SHA_MSR, msg_size);
2444         atmel_sha_write(dd, SHA_BCR, msg_size);
2445 
2446         atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
2447 
2448         /* Process assoc data. */
2449         return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen,
2450                                    true, false,
2451                                    atmel_sha_authenc_init_done);
2452 }
2453 
2454 static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd)
2455 {
2456         struct ahash_request *req = dd->req;
2457         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2458 
2459         return authctx->cb(authctx->aes_dev, 0, dd->is_async);
2460 }
2461 
2462 int atmel_sha_authenc_final(struct ahash_request *req,
2463                             u32 *digest, unsigned int digestlen,
2464                             atmel_aes_authenc_fn_t cb,
2465                             struct atmel_aes_dev *aes_dev)
2466 {
2467         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2468         struct atmel_sha_reqctx *ctx = &authctx->base;
2469         struct atmel_sha_dev *dd = ctx->dd;
2470 
2471         switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
2472         case SHA_FLAGS_SHA1:
2473                 authctx->digestlen = SHA1_DIGEST_SIZE;
2474                 break;
2475 
2476         case SHA_FLAGS_SHA224:
2477                 authctx->digestlen = SHA224_DIGEST_SIZE;
2478                 break;
2479 
2480         case SHA_FLAGS_SHA256:
2481                 authctx->digestlen = SHA256_DIGEST_SIZE;
2482                 break;
2483 
2484         case SHA_FLAGS_SHA384:
2485                 authctx->digestlen = SHA384_DIGEST_SIZE;
2486                 break;
2487 
2488         case SHA_FLAGS_SHA512:
2489                 authctx->digestlen = SHA512_DIGEST_SIZE;
2490                 break;
2491 
2492         default:
2493                 return atmel_sha_complete(dd, -EINVAL);
2494         }
2495         if (authctx->digestlen > digestlen)
2496                 authctx->digestlen = digestlen;
2497 
2498         authctx->cb = cb;
2499         authctx->aes_dev = aes_dev;
2500         authctx->digest = digest;
2501         return atmel_sha_wait_for_data_ready(dd,
2502                                              atmel_sha_authenc_final_done);
2503 }
2504 EXPORT_SYMBOL_GPL(atmel_sha_authenc_final);
2505 
2506 static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd)
2507 {
2508         struct ahash_request *req = dd->req;
2509         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2510         size_t i, num_words = authctx->digestlen / sizeof(u32);
2511 
2512         for (i = 0; i < num_words; ++i)
2513                 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i));
2514 
2515         return atmel_sha_complete(dd, 0);
2516 }
2517 
2518 void atmel_sha_authenc_abort(struct ahash_request *req)
2519 {
2520         struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
2521         struct atmel_sha_reqctx *ctx = &authctx->base;
2522         struct atmel_sha_dev *dd = ctx->dd;
2523 
2524         /* Prevent atmel_sha_complete() from calling req->base.complete(). */
2525         dd->is_async = false;
2526         dd->force_complete = false;
2527         (void)atmel_sha_complete(dd, 0);
2528 }
2529 EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort);
2530 
2531 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
2532 
2533 
2534 static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
2535 {
2536         int i;
2537 
2538         if (dd->caps.has_hmac)
2539                 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++)
2540                         crypto_unregister_ahash(&sha_hmac_algs[i]);
2541 
2542         for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++)
2543                 crypto_unregister_ahash(&sha_1_256_algs[i]);
2544 
2545         if (dd->caps.has_sha224)
2546                 crypto_unregister_ahash(&sha_224_alg);
2547 
2548         if (dd->caps.has_sha_384_512) {
2549                 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++)
2550                         crypto_unregister_ahash(&sha_384_512_algs[i]);
2551         }
2552 }
2553 
2554 static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
2555 {
2556         int err, i, j;
2557 
2558         for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) {
2559                 err = crypto_register_ahash(&sha_1_256_algs[i]);
2560                 if (err)
2561                         goto err_sha_1_256_algs;
2562         }
2563 
2564         if (dd->caps.has_sha224) {
2565                 err = crypto_register_ahash(&sha_224_alg);
2566                 if (err)
2567                         goto err_sha_224_algs;
2568         }
2569 
2570         if (dd->caps.has_sha_384_512) {
2571                 for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) {
2572                         err = crypto_register_ahash(&sha_384_512_algs[i]);
2573                         if (err)
2574                                 goto err_sha_384_512_algs;
2575                 }
2576         }
2577 
2578         if (dd->caps.has_hmac) {
2579                 for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) {
2580                         err = crypto_register_ahash(&sha_hmac_algs[i]);
2581                         if (err)
2582                                 goto err_sha_hmac_algs;
2583                 }
2584         }
2585 
2586         return 0;
2587 
2588         /*i = ARRAY_SIZE(sha_hmac_algs);*/
2589 err_sha_hmac_algs:
2590         for (j = 0; j < i; j++)
2591                 crypto_unregister_ahash(&sha_hmac_algs[j]);
2592         i = ARRAY_SIZE(sha_384_512_algs);
2593 err_sha_384_512_algs:
2594         for (j = 0; j < i; j++)
2595                 crypto_unregister_ahash(&sha_384_512_algs[j]);
2596         crypto_unregister_ahash(&sha_224_alg);
2597 err_sha_224_algs:
2598         i = ARRAY_SIZE(sha_1_256_algs);
2599 err_sha_1_256_algs:
2600         for (j = 0; j < i; j++)
2601                 crypto_unregister_ahash(&sha_1_256_algs[j]);
2602 
2603         return err;
2604 }
2605 
2606 static bool atmel_sha_filter(struct dma_chan *chan, void *slave)
2607 {
2608         struct at_dma_slave     *sl = slave;
2609 
2610         if (sl && sl->dma_dev == chan->device->dev) {
2611                 chan->private = sl;
2612                 return true;
2613         } else {
2614                 return false;
2615         }
2616 }
2617 
2618 static int atmel_sha_dma_init(struct atmel_sha_dev *dd,
2619                                 struct crypto_platform_data *pdata)
2620 {
2621         dma_cap_mask_t mask_in;
2622 
2623         /* Try to grab DMA channel */
2624         dma_cap_zero(mask_in);
2625         dma_cap_set(DMA_SLAVE, mask_in);
2626 
2627         dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in,
2628                         atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx");
2629         if (!dd->dma_lch_in.chan) {
2630                 dev_warn(dd->dev, "no DMA channel available\n");
2631                 return -ENODEV;
2632         }
2633 
2634         dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
2635         dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
2636                 SHA_REG_DIN(0);
2637         dd->dma_lch_in.dma_conf.src_maxburst = 1;
2638         dd->dma_lch_in.dma_conf.src_addr_width =
2639                 DMA_SLAVE_BUSWIDTH_4_BYTES;
2640         dd->dma_lch_in.dma_conf.dst_maxburst = 1;
2641         dd->dma_lch_in.dma_conf.dst_addr_width =
2642                 DMA_SLAVE_BUSWIDTH_4_BYTES;
2643         dd->dma_lch_in.dma_conf.device_fc = false;
2644 
2645         return 0;
2646 }
2647 
2648 static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd)
2649 {
2650         dma_release_channel(dd->dma_lch_in.chan);
2651 }
2652 
2653 static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
2654 {
2655 
2656         dd->caps.has_dma = 0;
2657         dd->caps.has_dualbuff = 0;
2658         dd->caps.has_sha224 = 0;
2659         dd->caps.has_sha_384_512 = 0;
2660         dd->caps.has_uihv = 0;
2661         dd->caps.has_hmac = 0;
2662 
2663         /* keep only major version number */
2664         switch (dd->hw_version & 0xff0) {
2665         case 0x510:
2666                 dd->caps.has_dma = 1;
2667                 dd->caps.has_dualbuff = 1;
2668                 dd->caps.has_sha224 = 1;
2669                 dd->caps.has_sha_384_512 = 1;
2670                 dd->caps.has_uihv = 1;
2671                 dd->caps.has_hmac = 1;
2672                 break;
2673         case 0x420:
2674                 dd->caps.has_dma = 1;
2675                 dd->caps.has_dualbuff = 1;
2676                 dd->caps.has_sha224 = 1;
2677                 dd->caps.has_sha_384_512 = 1;
2678                 dd->caps.has_uihv = 1;
2679                 break;
2680         case 0x410:
2681                 dd->caps.has_dma = 1;
2682                 dd->caps.has_dualbuff = 1;
2683                 dd->caps.has_sha224 = 1;
2684                 dd->caps.has_sha_384_512 = 1;
2685                 break;
2686         case 0x400:
2687                 dd->caps.has_dma = 1;
2688                 dd->caps.has_dualbuff = 1;
2689                 dd->caps.has_sha224 = 1;
2690                 break;
2691         case 0x320:
2692                 break;
2693         default:
2694                 dev_warn(dd->dev,
2695                                 "Unmanaged sha version, set minimum capabilities\n");
2696                 break;
2697         }
2698 }
2699 
2700 #if defined(CONFIG_OF)
2701 static const struct of_device_id atmel_sha_dt_ids[] = {
2702         { .compatible = "atmel,at91sam9g46-sha" },
2703         { /* sentinel */ }
2704 };
2705 
2706 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
2707 
2708 static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev)
2709 {
2710         struct device_node *np = pdev->dev.of_node;
2711         struct crypto_platform_data *pdata;
2712 
2713         if (!np) {
2714                 dev_err(&pdev->dev, "device node not found\n");
2715                 return ERR_PTR(-EINVAL);
2716         }
2717 
2718         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2719         if (!pdata)
2720                 return ERR_PTR(-ENOMEM);
2721 
2722         pdata->dma_slave = devm_kzalloc(&pdev->dev,
2723                                         sizeof(*(pdata->dma_slave)),
2724                                         GFP_KERNEL);
2725         if (!pdata->dma_slave)
2726                 return ERR_PTR(-ENOMEM);
2727 
2728         return pdata;
2729 }
2730 #else /* CONFIG_OF */
2731 static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev)
2732 {
2733         return ERR_PTR(-EINVAL);
2734 }
2735 #endif
2736 
2737 static int atmel_sha_probe(struct platform_device *pdev)
2738 {
2739         struct atmel_sha_dev *sha_dd;
2740         struct crypto_platform_data     *pdata;
2741         struct device *dev = &pdev->dev;
2742         struct resource *sha_res;
2743         int err;
2744 
2745         sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
2746         if (sha_dd == NULL) {
2747                 err = -ENOMEM;
2748                 goto sha_dd_err;
2749         }
2750 
2751         sha_dd->dev = dev;
2752 
2753         platform_set_drvdata(pdev, sha_dd);
2754 
2755         INIT_LIST_HEAD(&sha_dd->list);
2756         spin_lock_init(&sha_dd->lock);
2757 
2758         tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
2759                                         (unsigned long)sha_dd);
2760         tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
2761                                         (unsigned long)sha_dd);
2762 
2763         crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
2764 
2765         /* Get the base address */
2766         sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2767         if (!sha_res) {
2768                 dev_err(dev, "no MEM resource info\n");
2769                 err = -ENODEV;
2770                 goto res_err;
2771         }
2772         sha_dd->phys_base = sha_res->start;
2773 
2774         /* Get the IRQ */
2775         sha_dd->irq = platform_get_irq(pdev,  0);
2776         if (sha_dd->irq < 0) {
2777                 err = sha_dd->irq;
2778                 goto res_err;
2779         }
2780 
2781         err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
2782                                IRQF_SHARED, "atmel-sha", sha_dd);
2783         if (err) {
2784                 dev_err(dev, "unable to request sha irq.\n");
2785                 goto res_err;
2786         }
2787 
2788         /* Initializing the clock */
2789         sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
2790         if (IS_ERR(sha_dd->iclk)) {
2791                 dev_err(dev, "clock initialization failed.\n");
2792                 err = PTR_ERR(sha_dd->iclk);
2793                 goto res_err;
2794         }
2795 
2796         sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
2797         if (IS_ERR(sha_dd->io_base)) {
2798                 dev_err(dev, "can't ioremap\n");
2799                 err = PTR_ERR(sha_dd->io_base);
2800                 goto res_err;
2801         }
2802 
2803         err = clk_prepare(sha_dd->iclk);
2804         if (err)
2805                 goto res_err;
2806 
2807         atmel_sha_hw_version_init(sha_dd);
2808 
2809         atmel_sha_get_cap(sha_dd);
2810 
2811         if (sha_dd->caps.has_dma) {
2812                 pdata = pdev->dev.platform_data;
2813                 if (!pdata) {
2814                         pdata = atmel_sha_of_init(pdev);
2815                         if (IS_ERR(pdata)) {
2816                                 dev_err(&pdev->dev, "platform data not available\n");
2817                                 err = PTR_ERR(pdata);
2818                                 goto iclk_unprepare;
2819                         }
2820                 }
2821                 if (!pdata->dma_slave) {
2822                         err = -ENXIO;
2823                         goto iclk_unprepare;
2824                 }
2825                 err = atmel_sha_dma_init(sha_dd, pdata);
2826                 if (err)
2827                         goto err_sha_dma;
2828 
2829                 dev_info(dev, "using %s for DMA transfers\n",
2830                                 dma_chan_name(sha_dd->dma_lch_in.chan));
2831         }
2832 
2833         spin_lock(&atmel_sha.lock);
2834         list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
2835         spin_unlock(&atmel_sha.lock);
2836 
2837         err = atmel_sha_register_algs(sha_dd);
2838         if (err)
2839                 goto err_algs;
2840 
2841         dev_info(dev, "Atmel SHA1/SHA256%s%s\n",
2842                         sha_dd->caps.has_sha224 ? "/SHA224" : "",
2843                         sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : "");
2844 
2845         return 0;
2846 
2847 err_algs:
2848         spin_lock(&atmel_sha.lock);
2849         list_del(&sha_dd->list);
2850         spin_unlock(&atmel_sha.lock);
2851         if (sha_dd->caps.has_dma)
2852                 atmel_sha_dma_cleanup(sha_dd);
2853 err_sha_dma:
2854 iclk_unprepare:
2855         clk_unprepare(sha_dd->iclk);
2856 res_err:
2857         tasklet_kill(&sha_dd->queue_task);
2858         tasklet_kill(&sha_dd->done_task);
2859 sha_dd_err:
2860         dev_err(dev, "initialization failed.\n");
2861 
2862         return err;
2863 }
2864 
2865 static int atmel_sha_remove(struct platform_device *pdev)
2866 {
2867         struct atmel_sha_dev *sha_dd;
2868 
2869         sha_dd = platform_get_drvdata(pdev);
2870         if (!sha_dd)
2871                 return -ENODEV;
2872         spin_lock(&atmel_sha.lock);
2873         list_del(&sha_dd->list);
2874         spin_unlock(&atmel_sha.lock);
2875 
2876         atmel_sha_unregister_algs(sha_dd);
2877 
2878         tasklet_kill(&sha_dd->queue_task);
2879         tasklet_kill(&sha_dd->done_task);
2880 
2881         if (sha_dd->caps.has_dma)
2882                 atmel_sha_dma_cleanup(sha_dd);
2883 
2884         clk_unprepare(sha_dd->iclk);
2885 
2886         return 0;
2887 }
2888 
2889 static struct platform_driver atmel_sha_driver = {
2890         .probe          = atmel_sha_probe,
2891         .remove         = atmel_sha_remove,
2892         .driver         = {
2893                 .name   = "atmel_sha",
2894                 .of_match_table = of_match_ptr(atmel_sha_dt_ids),
2895         },
2896 };
2897 
2898 module_platform_driver(atmel_sha_driver);
2899 
2900 MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support.");
2901 MODULE_LICENSE("GPL v2");
2902 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");

/* [<][>][^][v][top][bottom][index][help] */