1/*
2 * AMD Cryptographic Coprocessor (CCP) crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/ccp.h>
18#include <linux/scatterlist.h>
19#include <crypto/internal/hash.h>
20
21#include "ccp-crypto.h"
22
23MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
24MODULE_LICENSE("GPL");
25MODULE_VERSION("1.0.0");
26MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
27
28static unsigned int aes_disable;
29module_param(aes_disable, uint, 0444);
30MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
31
32static unsigned int sha_disable;
33module_param(sha_disable, uint, 0444);
34MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
35
36/* List heads for the supported algorithms */
37static LIST_HEAD(hash_algs);
38static LIST_HEAD(cipher_algs);
39
40/* For any tfm, requests for that tfm must be returned on the order
41 * received.  With multiple queues available, the CCP can process more
42 * than one cmd at a time.  Therefore we must maintain a cmd list to insure
43 * the proper ordering of requests on a given tfm.
44 */
45struct ccp_crypto_queue {
46	struct list_head cmds;
47	struct list_head *backlog;
48	unsigned int cmd_count;
49};
50
51#define CCP_CRYPTO_MAX_QLEN	100
52
53static struct ccp_crypto_queue req_queue;
54static spinlock_t req_queue_lock;
55
56struct ccp_crypto_cmd {
57	struct list_head entry;
58
59	struct ccp_cmd *cmd;
60
61	/* Save the crypto_tfm and crypto_async_request addresses
62	 * separately to avoid any reference to a possibly invalid
63	 * crypto_async_request structure after invoking the request
64	 * callback
65	 */
66	struct crypto_async_request *req;
67	struct crypto_tfm *tfm;
68
69	/* Used for held command processing to determine state */
70	int ret;
71};
72
73struct ccp_crypto_cpu {
74	struct work_struct work;
75	struct completion completion;
76	struct ccp_crypto_cmd *crypto_cmd;
77	int err;
78};
79
80static inline bool ccp_crypto_success(int err)
81{
82	if (err && (err != -EINPROGRESS) && (err != -EBUSY))
83		return false;
84
85	return true;
86}
87
88static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
89	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
90{
91	struct ccp_crypto_cmd *held = NULL, *tmp;
92	unsigned long flags;
93
94	*backlog = NULL;
95
96	spin_lock_irqsave(&req_queue_lock, flags);
97
98	/* Held cmds will be after the current cmd in the queue so start
99	 * searching for a cmd with a matching tfm for submission.
100	 */
101	tmp = crypto_cmd;
102	list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
103		if (crypto_cmd->tfm != tmp->tfm)
104			continue;
105		held = tmp;
106		break;
107	}
108
109	/* Process the backlog:
110	 *   Because cmds can be executed from any point in the cmd list
111	 *   special precautions have to be taken when handling the backlog.
112	 */
113	if (req_queue.backlog != &req_queue.cmds) {
114		/* Skip over this cmd if it is the next backlog cmd */
115		if (req_queue.backlog == &crypto_cmd->entry)
116			req_queue.backlog = crypto_cmd->entry.next;
117
118		*backlog = container_of(req_queue.backlog,
119					struct ccp_crypto_cmd, entry);
120		req_queue.backlog = req_queue.backlog->next;
121
122		/* Skip over this cmd if it is now the next backlog cmd */
123		if (req_queue.backlog == &crypto_cmd->entry)
124			req_queue.backlog = crypto_cmd->entry.next;
125	}
126
127	/* Remove the cmd entry from the list of cmds */
128	req_queue.cmd_count--;
129	list_del(&crypto_cmd->entry);
130
131	spin_unlock_irqrestore(&req_queue_lock, flags);
132
133	return held;
134}
135
136static void ccp_crypto_complete(void *data, int err)
137{
138	struct ccp_crypto_cmd *crypto_cmd = data;
139	struct ccp_crypto_cmd *held, *next, *backlog;
140	struct crypto_async_request *req = crypto_cmd->req;
141	struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
142	int ret;
143
144	if (err == -EINPROGRESS) {
145		/* Only propagate the -EINPROGRESS if necessary */
146		if (crypto_cmd->ret == -EBUSY) {
147			crypto_cmd->ret = -EINPROGRESS;
148			req->complete(req, -EINPROGRESS);
149		}
150
151		return;
152	}
153
154	/* Operation has completed - update the queue before invoking
155	 * the completion callbacks and retrieve the next cmd (cmd with
156	 * a matching tfm) that can be submitted to the CCP.
157	 */
158	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
159	if (backlog) {
160		backlog->ret = -EINPROGRESS;
161		backlog->req->complete(backlog->req, -EINPROGRESS);
162	}
163
164	/* Transition the state from -EBUSY to -EINPROGRESS first */
165	if (crypto_cmd->ret == -EBUSY)
166		req->complete(req, -EINPROGRESS);
167
168	/* Completion callbacks */
169	ret = err;
170	if (ctx->complete)
171		ret = ctx->complete(req, ret);
172	req->complete(req, ret);
173
174	/* Submit the next cmd */
175	while (held) {
176		/* Since we have already queued the cmd, we must indicate that
177		 * we can backlog so as not to "lose" this request.
178		 */
179		held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
180		ret = ccp_enqueue_cmd(held->cmd);
181		if (ccp_crypto_success(ret))
182			break;
183
184		/* Error occurred, report it and get the next entry */
185		ctx = crypto_tfm_ctx(held->req->tfm);
186		if (ctx->complete)
187			ret = ctx->complete(held->req, ret);
188		held->req->complete(held->req, ret);
189
190		next = ccp_crypto_cmd_complete(held, &backlog);
191		if (backlog) {
192			backlog->ret = -EINPROGRESS;
193			backlog->req->complete(backlog->req, -EINPROGRESS);
194		}
195
196		kfree(held);
197		held = next;
198	}
199
200	kfree(crypto_cmd);
201}
202
203static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
204{
205	struct ccp_crypto_cmd *active = NULL, *tmp;
206	unsigned long flags;
207	bool free_cmd = true;
208	int ret;
209
210	spin_lock_irqsave(&req_queue_lock, flags);
211
212	/* Check if the cmd can/should be queued */
213	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
214		ret = -EBUSY;
215		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
216			goto e_lock;
217	}
218
219	/* Look for an entry with the same tfm.  If there is a cmd
220	 * with the same tfm in the list then the current cmd cannot
221	 * be submitted to the CCP yet.
222	 */
223	list_for_each_entry(tmp, &req_queue.cmds, entry) {
224		if (crypto_cmd->tfm != tmp->tfm)
225			continue;
226		active = tmp;
227		break;
228	}
229
230	ret = -EINPROGRESS;
231	if (!active) {
232		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
233		if (!ccp_crypto_success(ret))
234			goto e_lock;	/* Error, don't queue it */
235		if ((ret == -EBUSY) &&
236		    !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
237			goto e_lock;	/* Not backlogging, don't queue it */
238	}
239
240	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
241		ret = -EBUSY;
242		if (req_queue.backlog == &req_queue.cmds)
243			req_queue.backlog = &crypto_cmd->entry;
244	}
245	crypto_cmd->ret = ret;
246
247	req_queue.cmd_count++;
248	list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
249
250	free_cmd = false;
251
252e_lock:
253	spin_unlock_irqrestore(&req_queue_lock, flags);
254
255	if (free_cmd)
256		kfree(crypto_cmd);
257
258	return ret;
259}
260
261/**
262 * ccp_crypto_enqueue_request - queue an crypto async request for processing
263 *				by the CCP
264 *
265 * @req: crypto_async_request struct to be processed
266 * @cmd: ccp_cmd struct to be sent to the CCP
267 */
268int ccp_crypto_enqueue_request(struct crypto_async_request *req,
269			       struct ccp_cmd *cmd)
270{
271	struct ccp_crypto_cmd *crypto_cmd;
272	gfp_t gfp;
273
274	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
275
276	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
277	if (!crypto_cmd)
278		return -ENOMEM;
279
280	/* The tfm pointer must be saved and not referenced from the
281	 * crypto_async_request (req) pointer because it is used after
282	 * completion callback for the request and the req pointer
283	 * might not be valid anymore.
284	 */
285	crypto_cmd->cmd = cmd;
286	crypto_cmd->req = req;
287	crypto_cmd->tfm = req->tfm;
288
289	cmd->callback = ccp_crypto_complete;
290	cmd->data = crypto_cmd;
291
292	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
293		cmd->flags |= CCP_CMD_MAY_BACKLOG;
294	else
295		cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
296
297	return ccp_crypto_enqueue_cmd(crypto_cmd);
298}
299
300struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
301					    struct scatterlist *sg_add)
302{
303	struct scatterlist *sg, *sg_last = NULL;
304
305	for (sg = table->sgl; sg; sg = sg_next(sg))
306		if (!sg_page(sg))
307			break;
308	BUG_ON(!sg);
309
310	for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
311		sg_set_page(sg, sg_page(sg_add), sg_add->length,
312			    sg_add->offset);
313		sg_last = sg;
314	}
315	BUG_ON(sg_add);
316
317	return sg_last;
318}
319
320static int ccp_register_algs(void)
321{
322	int ret;
323
324	if (!aes_disable) {
325		ret = ccp_register_aes_algs(&cipher_algs);
326		if (ret)
327			return ret;
328
329		ret = ccp_register_aes_cmac_algs(&hash_algs);
330		if (ret)
331			return ret;
332
333		ret = ccp_register_aes_xts_algs(&cipher_algs);
334		if (ret)
335			return ret;
336	}
337
338	if (!sha_disable) {
339		ret = ccp_register_sha_algs(&hash_algs);
340		if (ret)
341			return ret;
342	}
343
344	return 0;
345}
346
347static void ccp_unregister_algs(void)
348{
349	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
350	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
351
352	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
353		crypto_unregister_ahash(&ahash_alg->alg);
354		list_del(&ahash_alg->entry);
355		kfree(ahash_alg);
356	}
357
358	list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
359		crypto_unregister_alg(&ablk_alg->alg);
360		list_del(&ablk_alg->entry);
361		kfree(ablk_alg);
362	}
363}
364
365static int ccp_crypto_init(void)
366{
367	int ret;
368
369	ret = ccp_present();
370	if (ret)
371		return ret;
372
373	spin_lock_init(&req_queue_lock);
374	INIT_LIST_HEAD(&req_queue.cmds);
375	req_queue.backlog = &req_queue.cmds;
376	req_queue.cmd_count = 0;
377
378	ret = ccp_register_algs();
379	if (ret)
380		ccp_unregister_algs();
381
382	return ret;
383}
384
385static void ccp_crypto_exit(void)
386{
387	ccp_unregister_algs();
388}
389
390module_init(ccp_crypto_init);
391module_exit(ccp_crypto_exit);
392