1 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
2 index dffb855..3e11215 100644
3 --- a/drivers/crypto/Kconfig
4 +++ b/drivers/crypto/Kconfig
5 @@ -286,6 +286,16 @@ config CRYPTO_DEV_SAHARA
6 This option enables support for the SAHARA HW crypto accelerator
7 found in some Freescale i.MX chips.
10 + tristate "Support for the DCP engine"
11 + depends on ARCH_MXS && OF
12 + select CRYPTO_BLKCIPHER
16 + This options enables support for the hardware crypto-acceleration
17 + capabilities of the DCP co-processor
20 tristate "Support for Samsung S5PV210 crypto accelerator"
21 depends on ARCH_S5PV210
22 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
23 index 38ce13d..b4946dd 100644
24 --- a/drivers/crypto/Makefile
25 +++ b/drivers/crypto/Makefile
26 @@ -13,6 +13,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
27 obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
28 obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
29 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
30 +obj-$(CONFIG_CRYPTO_DEV_DCP) += dcp.o
31 obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
32 obj-$(CONFIG_CRYPTO_DEV_TEGRA_AES) += tegra-aes.o
33 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
34 diff --git a/drivers/crypto/dcp.c b/drivers/crypto/dcp.c
36 index 0000000..eea194c
38 +++ b/drivers/crypto/dcp.c
41 + * Cryptographic API.
43 + * Support for DCP cryptographic accelerator.
45 + * Copyright (c) 2013
46 + * Author: Tobias Rauter <tobias.rau...@gmail.com>
48 + * This program is free software; you can redistribute it and/or modify
49 + * it under the terms of the GNU General Public License version 2 as published
50 + * by the Free Software Foundation.
52 + * Based on tegra-aes.c, dcp.c (from freescale SDK) and sahara.c
54 +#include <linux/module.h>
55 +#include <linux/init.h>
56 +#include <linux/errno.h>
57 +#include <linux/kernel.h>
58 +#include <linux/platform_device.h>
59 +#include <linux/dma-mapping.h>
60 +#include <linux/io.h>
61 +#include <linux/mutex.h>
62 +#include <linux/interrupt.h>
63 +#include <linux/completion.h>
64 +#include <linux/workqueue.h>
65 +#include <linux/delay.h>
66 +#include <linux/crypto.h>
67 +#include <linux/miscdevice.h>
69 +#include <crypto/scatterwalk.h>
70 +#include <crypto/aes.h>
73 +/* IOCTL for DCP OTP Key AES - taken from Freescale's SDK*/
74 +#define DBS_IOCTL_BASE 'd'
75 +#define DBS_ENC _IOW(DBS_IOCTL_BASE, 0x00, uint8_t[16])
76 +#define DBS_DEC _IOW(DBS_IOCTL_BASE, 0x01, uint8_t[16])
78 +/* DCP channel used for AES */
79 +#define USED_CHANNEL 1
80 +/* Ring Buffers' maximum size */
81 +#define DCP_MAX_PKG 20
83 +/* Control Register */
84 +#define DCP_REG_CTRL 0x000
85 +#define DCP_CTRL_SFRST (1<<31)
86 +#define DCP_CTRL_CLKGATE (1<<30)
87 +#define DCP_CTRL_CRYPTO_PRESENT (1<<29)
88 +#define DCP_CTRL_SHA_PRESENT (1<<28)
89 +#define DCP_CTRL_GATHER_RES_WRITE (1<<23)
90 +#define DCP_CTRL_ENABLE_CONTEXT_CACHE (1<<22)
91 +#define DCP_CTRL_ENABLE_CONTEXT_SWITCH (1<<21)
92 +#define DCP_CTRL_CH_IRQ_E_0 0x01
93 +#define DCP_CTRL_CH_IRQ_E_1 0x02
94 +#define DCP_CTRL_CH_IRQ_E_2 0x04
95 +#define DCP_CTRL_CH_IRQ_E_3 0x08
97 +/* Status register */
98 +#define DCP_REG_STAT 0x010
99 +#define DCP_STAT_OTP_KEY_READY (1<<28)
100 +#define DCP_STAT_CUR_CHANNEL(stat) ((stat>>24)&0x0F)
101 +#define DCP_STAT_READY_CHANNEL(stat) ((stat>>16)&0x0F)
102 +#define DCP_STAT_IRQ(stat) (stat&0x0F)
103 +#define DCP_STAT_CHAN_0 (0x01)
104 +#define DCP_STAT_CHAN_1 (0x02)
105 +#define DCP_STAT_CHAN_2 (0x04)
106 +#define DCP_STAT_CHAN_3 (0x08)
108 +/* Channel Control Register */
109 +#define DCP_REG_CHAN_CTRL 0x020
110 +#define DCP_CHAN_CTRL_CH0_IRQ_MERGED (1<<16)
111 +#define DCP_CHAN_CTRL_HIGH_PRIO_0 (0x0100)
112 +#define DCP_CHAN_CTRL_HIGH_PRIO_1 (0x0200)
113 +#define DCP_CHAN_CTRL_HIGH_PRIO_2 (0x0400)
114 +#define DCP_CHAN_CTRL_HIGH_PRIO_3 (0x0800)
115 +#define DCP_CHAN_CTRL_ENABLE_0 (0x01)
116 +#define DCP_CHAN_CTRL_ENABLE_1 (0x02)
117 +#define DCP_CHAN_CTRL_ENABLE_2 (0x04)
118 +#define DCP_CHAN_CTRL_ENABLE_3 (0x08)
121 + * Channel Registers:
122 + * The DCP has 4 channels. Each of this channels
123 + * has 4 registers (command pointer, semaphore, status and options).
124 + * The address of register REG of channel CHAN is obtained by
125 + * dcp_chan_reg(REG, CHAN)
127 +#define DCP_REG_CHAN_PTR 0x00000100
128 +#define DCP_REG_CHAN_SEMA 0x00000110
129 +#define DCP_REG_CHAN_STAT 0x00000120
130 +#define DCP_REG_CHAN_OPT 0x00000130
132 +#define DCP_CHAN_STAT_NEXT_CHAIN_IS_0 0x010000
133 +#define DCP_CHAN_STAT_NO_CHAIN 0x020000
134 +#define DCP_CHAN_STAT_CONTEXT_ERROR 0x030000
135 +#define DCP_CHAN_STAT_PAYLOAD_ERROR 0x040000
136 +#define DCP_CHAN_STAT_INVALID_MODE 0x050000
137 +#define DCP_CHAN_STAT_PAGEFAULT 0x40
138 +#define DCP_CHAN_STAT_DST 0x20
139 +#define DCP_CHAN_STAT_SRC 0x10
140 +#define DCP_CHAN_STAT_PACKET 0x08
141 +#define DCP_CHAN_STAT_SETUP 0x04
142 +#define DCP_CHAN_STAT_MISMATCH 0x02
144 +/* hw packet control*/
146 +#define DCP_PKT_PAYLOAD_KEY (1<<11)
147 +#define DCP_PKT_OTP_KEY (1<<10)
148 +#define DCP_PKT_CIPHER_INIT (1<<9)
149 +#define DCP_PKG_CIPHER_ENCRYPT (1<<8)
150 +#define DCP_PKT_CIPHER_ENABLE (1<<5)
151 +#define DCP_PKT_DECR_SEM (1<<1)
152 +#define DCP_PKT_CHAIN (1<<2)
153 +#define DCP_PKT_IRQ 1
155 +#define DCP_PKT_MODE_CBC (1<<4)
156 +#define DCP_PKT_KEYSELECT_OTP (0xFF<<8)
159 +#define DCP_ENC 0x0001
160 +#define DCP_DEC 0x0002
161 +#define DCP_ECB 0x0004
162 +#define DCP_CBC 0x0008
163 +#define DCP_CBC_INIT 0x0010
164 +#define DCP_NEW_KEY 0x0040
165 +#define DCP_OTP_KEY 0x0080
166 +#define DCP_AES 0x1000
169 +#define DCP_FLAG_BUSY 0x01
170 +#define DCP_FLAG_PRODUCING 0x02
176 +struct dcp_dev_req_ctx {
181 + unsigned int flags;
182 + u8 key[AES_KEYSIZE_128];
185 + struct ablkcipher_request *req;
186 + struct crypto_ablkcipher *fallback;
191 + struct ablkcipher_walk walk;
195 + struct device *dev;
196 + void __iomem *dcp_regs_base;
201 + spinlock_t queue_lock;
202 + struct crypto_queue queue;
204 + uint32_t pkt_produced;
205 + uint32_t pkt_consumed;
207 + struct dcp_hw_packet *hw_pkg[DCP_MAX_PKG];
208 + dma_addr_t hw_phys_pkg;
210 + /* [KEY][IV] Both with 16 Bytes */
212 + dma_addr_t payload_base_dma;
215 + struct tasklet_struct done_task;
216 + struct tasklet_struct queue_task;
217 + struct timer_list watchdog;
219 + unsigned long flags;
221 + struct dcp_op *ctx;
223 + struct miscdevice dcp_bootstream_misc;
226 +struct dcp_hw_packet {
237 +struct dcp_dev *global_dev;
239 +static inline u32 dcp_chan_reg(u32 reg, int chan)
241 + return reg + (chan) * 0x40;
244 +static inline void dcp_write(struct dcp_dev *dev, u32 data, u32 reg)
246 + writel(data, dev->dcp_regs_base + reg);
249 +static inline void dcp_set(struct dcp_dev *dev, u32 data, u32 reg)
251 + writel(data, dev->dcp_regs_base + (reg | 0x04));
254 +static inline void dcp_clear(struct dcp_dev *dev, u32 data, u32 reg)
256 + writel(data, dev->dcp_regs_base + (reg | 0x08));
259 +static inline void dcp_toggle(struct dcp_dev *dev, u32 data, u32 reg)
261 + writel(data, dev->dcp_regs_base + (reg | 0x0C));
264 +static inline unsigned int dcp_read(struct dcp_dev *dev, u32 reg)
266 + return readl(dev->dcp_regs_base + reg);
269 +void dcp_dma_unmap(struct dcp_dev *dev, struct dcp_hw_packet *pkt)
271 + dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
272 + dma_unmap_page(dev->dev, pkt->dst, pkt->size, DMA_FROM_DEVICE);
273 + dev_dbg(dev->dev, "unmap packet %x", (unsigned int) pkt);
276 +int dcp_dma_map(struct dcp_dev *dev,
277 + struct ablkcipher_walk *walk, struct dcp_hw_packet *pkt)
279 + dev_dbg(dev->dev, "map packet %x", (unsigned int) pkt);
280 + /* align to length = 16 */
281 + pkt->size = walk->nbytes - (walk->nbytes % 16);
283 + pkt->src = dma_map_page(dev->dev, walk->src.page, walk->src.offset,
284 + pkt->size, DMA_TO_DEVICE);
286 + if (pkt->src == 0) {
287 + dev_err(dev->dev, "Unable to map src");
291 + pkt->dst = dma_map_page(dev->dev, walk->dst.page, walk->dst.offset,
292 + pkt->size, DMA_FROM_DEVICE);
294 + if (pkt->dst == 0) {
295 + dev_err(dev->dev, "Unable to map dst");
296 + dma_unmap_page(dev->dev, pkt->src, pkt->size, DMA_TO_DEVICE);
303 +static void dcp_op_one(struct dcp_dev *dev, struct dcp_hw_packet *pkt,
306 + struct dcp_op *ctx = dev->ctx;
307 + pkt->pkt1 = ctx->pkt1;
308 + pkt->pkt2 = ctx->pkt2;
310 + pkt->payload = (u32) dev->payload_base_dma;
313 + if (ctx->flags & DCP_CBC_INIT) {
314 + pkt->pkt1 |= DCP_PKT_CIPHER_INIT;
315 + ctx->flags &= ~DCP_CBC_INIT;
318 + mod_timer(&dev->watchdog, jiffies + msecs_to_jiffies(500));
319 + pkt->pkt1 |= DCP_PKT_IRQ;
321 + pkt->pkt1 |= DCP_PKT_CHAIN;
323 + dev->pkt_produced++;
326 + dcp_chan_reg(DCP_REG_CHAN_SEMA, USED_CHANNEL));
329 +static void dcp_op_proceed(struct dcp_dev *dev)
331 + struct dcp_op *ctx = dev->ctx;
332 + struct dcp_hw_packet *pkt;
334 + while (ctx->walk.nbytes) {
337 + pkt = dev->hw_pkg[dev->pkt_produced % DCP_MAX_PKG];
338 + err = dcp_dma_map(dev, &ctx->walk, pkt);
340 + dev->ctx->stat |= err;
341 + /* start timer to wait for already set up calls */
342 + mod_timer(&dev->watchdog,
343 + jiffies + msecs_to_jiffies(500));
348 + err = ctx->walk.nbytes - pkt->size;
349 + ablkcipher_walk_done(dev->ctx->req, &dev->ctx->walk, err);
351 + dcp_op_one(dev, pkt, ctx->walk.nbytes == 0);
352 + /* we have to wait if no space is left in buffer */
353 + if (dev->pkt_produced - dev->pkt_consumed == DCP_MAX_PKG)
356 + clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
359 +static void dcp_op_start(struct dcp_dev *dev, uint8_t use_walk)
361 + struct dcp_op *ctx = dev->ctx;
363 + if (ctx->flags & DCP_NEW_KEY) {
364 + memcpy(dev->payload_base, ctx->key, ctx->keylen);
365 + ctx->flags &= ~DCP_NEW_KEY;
369 + ctx->pkt1 |= DCP_PKT_CIPHER_ENABLE;
370 + ctx->pkt1 |= DCP_PKT_DECR_SEM;
372 + if (ctx->flags & DCP_OTP_KEY)
373 + ctx->pkt1 |= DCP_PKT_OTP_KEY;
375 + ctx->pkt1 |= DCP_PKT_PAYLOAD_KEY;
377 + if (ctx->flags & DCP_ENC)
378 + ctx->pkt1 |= DCP_PKG_CIPHER_ENCRYPT;
381 + if (ctx->flags & DCP_CBC)
382 + ctx->pkt2 |= DCP_PKT_MODE_CBC;
384 + dev->pkt_produced = 0;
385 + dev->pkt_consumed = 0;
388 + dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
389 + dcp_write(dev, (u32) dev->hw_phys_pkg,
390 + dcp_chan_reg(DCP_REG_CHAN_PTR, USED_CHANNEL));
392 + set_bit(DCP_FLAG_PRODUCING, &dev->flags);
395 + ablkcipher_walk_init(&ctx->walk, ctx->req->dst,
396 + ctx->req->src, ctx->req->nbytes);
397 + ablkcipher_walk_phys(ctx->req, &ctx->walk);
398 + dcp_op_proceed(dev);
400 + dcp_op_one(dev, dev->hw_pkg[0], 1);
401 + clear_bit(DCP_FLAG_PRODUCING, &dev->flags);
405 +static void dcp_done_task(unsigned long data)
407 + struct dcp_dev *dev = (struct dcp_dev *)data;
408 + struct dcp_hw_packet *last_packet;
412 + for (last_packet = dev->hw_pkg[(dev->pkt_consumed) % DCP_MAX_PKG];
413 + last_packet->stat == 1;
415 + dev->hw_pkg[++(dev->pkt_consumed) % DCP_MAX_PKG]) {
417 + dcp_dma_unmap(dev, last_packet);
418 + last_packet->stat = 0;
421 + /* the last call of this function already consumed this IRQ's packet */
426 + "Packet(s) done with status %x; finished: %d, produced:%d, complete consumed: %d",
427 + dev->ctx->stat, fin, dev->pkt_produced, dev->pkt_consumed);
429 + last_packet = dev->hw_pkg[(dev->pkt_consumed - 1) % DCP_MAX_PKG];
430 + if (!dev->ctx->stat && last_packet->pkt1 & DCP_PKT_CHAIN) {
431 + if (!test_and_set_bit(DCP_FLAG_PRODUCING, &dev->flags))
432 + dcp_op_proceed(dev);
436 + while (unlikely(dev->pkt_consumed < dev->pkt_produced)) {
438 + dev->hw_pkg[dev->pkt_consumed++ % DCP_MAX_PKG]);
441 + if (dev->ctx->flags & DCP_OTP_KEY) {
442 + /* we used the miscdevice, no walk to finish */
443 + clear_bit(DCP_FLAG_BUSY, &dev->flags);
447 + ablkcipher_walk_complete(&dev->ctx->walk);
448 + dev->ctx->req->base.complete(&dev->ctx->req->base,
451 + /* in case there are other requests in the queue */
452 + tasklet_schedule(&dev->queue_task);
455 +void dcp_watchdog(unsigned long data)
457 + struct dcp_dev *dev = (struct dcp_dev *)data;
458 + dev->ctx->stat |= dcp_read(dev,
459 + dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
461 + dev_err(dev->dev, "Timeout, Channel status: %x", dev->ctx->stat);
463 + if (!dev->ctx->stat)
464 + dev->ctx->stat = -ETIMEDOUT;
466 + dcp_done_task(data);
470 +static irqreturn_t dcp_common_irq(int irq, void *context)
473 + struct dcp_dev *dev = (struct dcp_dev *) context;
475 + del_timer(&dev->watchdog);
477 + msk = DCP_STAT_IRQ(dcp_read(dev, DCP_REG_STAT));
478 + dcp_clear(dev, msk, DCP_REG_STAT);
482 + dev->ctx->stat |= dcp_read(dev,
483 + dcp_chan_reg(DCP_REG_CHAN_STAT, USED_CHANNEL));
485 + if (msk & DCP_STAT_CHAN_1)
486 + tasklet_schedule(&dev->done_task);
488 + return IRQ_HANDLED;
491 +static irqreturn_t dcp_vmi_irq(int irq, void *context)
493 + return dcp_common_irq(irq, context);
496 +static irqreturn_t dcp_irq(int irq, void *context)
498 + return dcp_common_irq(irq, context);
501 +static void dcp_crypt(struct dcp_dev *dev, struct dcp_op *ctx)
505 + if ((ctx->flags & DCP_CBC) && ctx->req->info) {
506 + ctx->flags |= DCP_CBC_INIT;
507 + memcpy(dev->payload_base + AES_KEYSIZE_128,
508 + ctx->req->info, AES_KEYSIZE_128);
511 + dcp_op_start(dev, 1);
514 +static void dcp_queue_task(unsigned long data)
516 + struct dcp_dev *dev = (struct dcp_dev *) data;
517 + struct crypto_async_request *async_req, *backlog;
518 + struct crypto_ablkcipher *tfm;
519 + struct dcp_op *ctx;
520 + struct dcp_dev_req_ctx *rctx;
521 + struct ablkcipher_request *req;
522 + unsigned long flags;
524 + spin_lock_irqsave(&dev->queue_lock, flags);
526 + backlog = crypto_get_backlog(&dev->queue);
527 + async_req = crypto_dequeue_request(&dev->queue);
529 + spin_unlock_irqrestore(&dev->queue_lock, flags);
532 + goto ret_nothing_done;
535 + backlog->complete(backlog, -EINPROGRESS);
537 + req = ablkcipher_request_cast(async_req);
538 + tfm = crypto_ablkcipher_reqtfm(req);
539 + rctx = ablkcipher_request_ctx(req);
540 + ctx = crypto_ablkcipher_ctx(tfm);
542 + if (!req->src || !req->dst)
543 + goto ret_nothing_done;
545 + ctx->flags |= rctx->mode;
548 + dcp_crypt(dev, ctx);
553 + clear_bit(DCP_FLAG_BUSY, &dev->flags);
557 +static int dcp_cra_init(struct crypto_tfm *tfm)
559 + const char *name = tfm->__crt_alg->cra_name;
560 + struct dcp_op *ctx = crypto_tfm_ctx(tfm);
562 + tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_dev_req_ctx);
564 + ctx->fallback = crypto_alloc_ablkcipher(name, 0,
565 + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
567 + if (IS_ERR(ctx->fallback)) {
568 + dev_err(global_dev->dev, "Error allocating fallback algo %s\n",
570 + return PTR_ERR(ctx->fallback);
576 +static void dcp_cra_exit(struct crypto_tfm *tfm)
578 + struct dcp_op *ctx = crypto_tfm_ctx(tfm);
581 + crypto_free_ablkcipher(ctx->fallback);
583 + ctx->fallback = NULL;
586 +/* async interface */
587 +static int dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
590 + struct dcp_op *ctx = crypto_ablkcipher_ctx(tfm);
591 + unsigned int ret = 0;
594 + if (len == AES_KEYSIZE_128) {
595 + if (memcmp(ctx->key, key, AES_KEYSIZE_128)) {
596 + memcpy(ctx->key, key, len);
597 + ctx->flags |= DCP_NEW_KEY;
602 + ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
603 + ctx->fallback->base.crt_flags |=
604 + (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
606 + ret = crypto_ablkcipher_setkey(ctx->fallback, key, len);
608 + struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
610 + tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
611 + tfm_aux->crt_flags |=
612 + (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
617 +static int dcp_aes_cbc_crypt(struct ablkcipher_request *req, int mode)
619 + struct dcp_dev_req_ctx *rctx = ablkcipher_request_ctx(req);
620 + struct dcp_dev *dev = global_dev;
621 + unsigned long flags;
624 + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
629 + spin_lock_irqsave(&dev->queue_lock, flags);
630 + err = ablkcipher_enqueue_request(&dev->queue, req);
631 + spin_unlock_irqrestore(&dev->queue_lock, flags);
633 + flags = test_and_set_bit(DCP_FLAG_BUSY, &dev->flags);
635 + if (!(flags & DCP_FLAG_BUSY))
636 + tasklet_schedule(&dev->queue_task);
641 +static int dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
643 + struct crypto_tfm *tfm =
644 + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
645 + struct dcp_op *ctx = crypto_ablkcipher_ctx(
646 + crypto_ablkcipher_reqtfm(req));
648 + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
650 + ablkcipher_request_set_tfm(req, ctx->fallback);
651 + err = crypto_ablkcipher_encrypt(req);
652 + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
656 + return dcp_aes_cbc_crypt(req, DCP_AES | DCP_ENC | DCP_CBC);
659 +static int dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
661 + struct crypto_tfm *tfm =
662 + crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
663 + struct dcp_op *ctx = crypto_ablkcipher_ctx(
664 + crypto_ablkcipher_reqtfm(req));
666 + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
668 + ablkcipher_request_set_tfm(req, ctx->fallback);
669 + err = crypto_ablkcipher_decrypt(req);
670 + ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
673 + return dcp_aes_cbc_crypt(req, DCP_AES | DCP_DEC | DCP_CBC);
676 +static struct crypto_alg algs[] = {
678 + .cra_name = "cbc(aes)",
679 + .cra_driver_name = "dcp-cbc-aes",
680 + .cra_alignmask = 3,
681 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
682 + CRYPTO_ALG_NEED_FALLBACK,
683 + .cra_blocksize = AES_KEYSIZE_128,
684 + .cra_type = &crypto_ablkcipher_type,
685 + .cra_priority = 300,
686 + .cra_u.ablkcipher = {
687 + .min_keysize = AES_KEYSIZE_128,
688 + .max_keysize = AES_KEYSIZE_128,
689 + .setkey = dcp_aes_setkey,
690 + .encrypt = dcp_aes_cbc_encrypt,
691 + .decrypt = dcp_aes_cbc_decrypt,
692 + .ivsize = AES_KEYSIZE_128,
698 +/* DCP bootstream verification interface: uses OTP key for crypto */
699 +static int dcp_bootstream_open(struct inode *inode, struct file *file)
701 + file->private_data = container_of((file->private_data),
702 + struct dcp_dev, dcp_bootstream_misc);
706 +static long dcp_bootstream_ioctl(struct file *file,
707 + unsigned int cmd, unsigned long arg)
709 + struct dcp_dev *dev = (struct dcp_dev *) file->private_data;
710 + void __user *argp = (void __user *)arg;
716 + if (cmd != DBS_ENC && cmd != DBS_DEC)
719 + if (copy_from_user(dev->payload_base, argp, 16))
722 + if (test_and_set_bit(DCP_FLAG_BUSY, &dev->flags))
725 + dev->ctx = kzalloc(sizeof(struct dcp_op), GFP_KERNEL);
728 + "cannot allocate context for OTP crypto");
729 + clear_bit(DCP_FLAG_BUSY, &dev->flags);
733 + dev->ctx->flags = DCP_AES | DCP_ECB | DCP_OTP_KEY | DCP_CBC_INIT;
734 + dev->ctx->flags |= (cmd == DBS_ENC) ? DCP_ENC : DCP_DEC;
735 + dev->hw_pkg[0]->src = dev->payload_base_dma;
736 + dev->hw_pkg[0]->dst = dev->payload_base_dma;
737 + dev->hw_pkg[0]->size = 16;
739 + dcp_op_start(dev, 0);
741 + while (test_bit(DCP_FLAG_BUSY, &dev->flags))
744 + ret = dev->ctx->stat;
745 + if (!ret && copy_to_user(argp, dev->payload_base, 16))
753 +static const struct file_operations dcp_bootstream_fops = {
754 + .owner = THIS_MODULE,
755 + .unlocked_ioctl = dcp_bootstream_ioctl,
756 + .open = dcp_bootstream_open,
759 +static int dcp_probe(struct platform_device *pdev)
761 + struct dcp_dev *dev = NULL;
762 + struct resource *r;
765 + dev = kzalloc(sizeof(*dev), GFP_KERNEL);
767 + dev_err(&pdev->dev, "Failed to allocate structure\n");
772 + dev->dev = &pdev->dev;
774 + platform_set_drvdata(pdev, dev);
776 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
778 + dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
782 + dev->dcp_regs_base = ioremap(r->start, resource_size(r));
785 + dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
787 + dcp_clear(dev, DCP_CTRL_SFRST | DCP_CTRL_CLKGATE, DCP_REG_CTRL);
789 + dcp_write(dev, DCP_CTRL_GATHER_RES_WRITE |
790 + DCP_CTRL_ENABLE_CONTEXT_CACHE | DCP_CTRL_CH_IRQ_E_1,
793 + dcp_write(dev, DCP_CHAN_CTRL_ENABLE_1, DCP_REG_CHAN_CTRL);
795 + for (i = 0; i < 4; i++)
796 + dcp_clear(dev, -1, dcp_chan_reg(DCP_REG_CHAN_STAT, i));
798 + dcp_clear(dev, -1, DCP_REG_STAT);
801 + r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
803 + dev_err(&pdev->dev, "can't get IRQ resource (0)\n");
805 + goto err_unmap_mem;
807 + dev->dcp_vmi_irq = r->start;
808 + ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
810 + dev_err(&pdev->dev, "can't request_irq (0)\n");
812 + goto err_unmap_mem;
815 + r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
817 + dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
819 + goto err_free_irq0;
821 + dev->dcp_irq = r->start;
822 + ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
824 + dev_err(&pdev->dev, "can't request_irq (1)\n");
826 + goto err_free_irq0;
829 + dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
830 + DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
833 + if (!dev->hw_pkg[0]) {
834 + dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
836 + goto err_free_irq1;
839 + for (i = 1; i < DCP_MAX_PKG; i++) {
840 + dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg
841 + + i * sizeof(struct dcp_hw_packet);
842 + dev->hw_pkg[i] = dev->hw_pkg[i - 1] + 1;
844 + dev->hw_pkg[i - 1]->next = dev->hw_phys_pkg;
847 + dev->payload_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
848 + &dev->payload_base_dma, GFP_KERNEL);
849 + if (!dev->payload_base) {
850 + dev_err(&pdev->dev, "Could not allocate memory for key\n");
852 + goto err_free_hw_packet;
854 + tasklet_init(&dev->queue_task, dcp_queue_task,
855 + (unsigned long) dev);
856 + tasklet_init(&dev->done_task, dcp_done_task,
857 + (unsigned long) dev);
858 + spin_lock_init(&dev->queue_lock);
860 + crypto_init_queue(&dev->queue, 10);
862 + init_timer(&dev->watchdog);
863 + dev->watchdog.function = &dcp_watchdog;
864 + dev->watchdog.data = (unsigned long)dev;
866 + dev->dcp_bootstream_misc.minor = MISC_DYNAMIC_MINOR,
867 + dev->dcp_bootstream_misc.name = "dcpboot",
868 + dev->dcp_bootstream_misc.fops = &dcp_bootstream_fops,
869 + ret = misc_register(&dev->dcp_bootstream_misc);
871 + dev_err(dev->dev, "Unable to register misc device\n");
872 + goto err_free_key_iv;
875 + for (i = 0; i < ARRAY_SIZE(algs); i++) {
876 + algs[i].cra_priority = 300;
877 + algs[i].cra_ctxsize = sizeof(struct dcp_op);
878 + algs[i].cra_module = THIS_MODULE;
879 + algs[i].cra_init = dcp_cra_init;
880 + algs[i].cra_exit = dcp_cra_exit;
881 + if (crypto_register_alg(&algs[i])) {
882 + dev_err(&pdev->dev, "register algorithm failed\n");
884 + goto err_unregister;
887 + dev_notice(&pdev->dev, "DCP crypto enabled.!\n");
892 + for (j = 0; j < i; j++)
893 + crypto_unregister_alg(&algs[j]);
895 + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
896 + dev->payload_base_dma);
898 + dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
899 + sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
902 + free_irq(dev->dcp_irq, dev);
904 + free_irq(dev->dcp_vmi_irq, dev);
906 + iounmap((void *) dev->dcp_regs_base);
913 +static int dcp_remove(struct platform_device *pdev)
915 + struct dcp_dev *dev;
917 + dev = platform_get_drvdata(pdev);
918 + platform_set_drvdata(pdev, NULL);
920 + dma_free_coherent(&pdev->dev,
921 + DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
922 + dev->hw_pkg[0], dev->hw_phys_pkg);
924 + dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
925 + dev->payload_base_dma);
927 + free_irq(dev->dcp_irq, dev);
928 + free_irq(dev->dcp_vmi_irq, dev);
930 + tasklet_kill(&dev->done_task);
931 + tasklet_kill(&dev->queue_task);
933 + iounmap((void *) dev->dcp_regs_base);
935 + for (j = 0; j < ARRAY_SIZE(algs); j++)
936 + crypto_unregister_alg(&algs[j]);
938 + misc_deregister(&dev->dcp_bootstream_misc);
944 +static struct of_device_id fs_dcp_of_match[] = {
945 + { .compatible = "fsl-dcp"},
949 +static struct platform_driver fs_dcp_driver = {
950 + .probe = dcp_probe,
951 + .remove = dcp_remove,
954 + .owner = THIS_MODULE,
955 + .of_match_table = fs_dcp_of_match
959 +module_platform_driver(fs_dcp_driver);
962 +MODULE_AUTHOR("Tobias Rauter <tobias.rau...@gmail.com>");
963 +MODULE_DESCRIPTION("Freescale DCP Crypto Driver");
964 +MODULE_LICENSE("GPL");