aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c')
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c145
1 files changed, 78 insertions, 67 deletions
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 13bdfb8a2c62..d01594353d9a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -26,7 +26,7 @@
static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
{
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
@@ -58,7 +58,8 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
- crypto_ahash_reqsize(op->fallback_tfm));
+ crypto_ahash_reqsize(op->fallback_tfm) +
+ CRYPTO_DMA_PADDING);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
memcpy(algt->fbname,
@@ -84,7 +85,7 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
int sun8i_ce_hash_init(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -100,7 +101,7 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -114,7 +115,7 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -128,7 +129,7 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
int sun8i_ce_hash_final(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -145,7 +146,7 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
int sun8i_ce_hash_update(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -160,7 +161,7 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
int sun8i_ce_hash_finup(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -178,7 +179,7 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -238,19 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
int sun8i_ce_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
- struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_dev *ce;
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
struct crypto_engine *engine;
int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
- ce = algt->ce;
-
e = sun8i_ce_get_engine_number(ce);
rctx->flow = e;
engine = ce->chanlist[e].engine;
@@ -316,28 +313,22 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count,
return j;
}
-int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
{
- struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
- int nr_sgs, flow, err;
+ int nr_sgs, err;
unsigned int len;
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf, *result;
int j, i, todo;
u64 bs;
int digestsize;
- dma_addr_t addr_res, addr_pad;
- int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -349,32 +340,16 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (digestsize == SHA384_DIGEST_SIZE)
digestsize = SHA512_DIGEST_SIZE;
- /* the padding could be up to two block. */
- buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- bf = (__le32 *)buf;
-
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- err = -ENOMEM;
- goto err_free_buf;
- }
-
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
+ bf = (__le32 *)rctx->pad;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_hash[algt->ce_algo_id];
common |= CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -382,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto err_free_result;
+ goto err_out;
}
len = areq->nbytes;
@@ -401,10 +377,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = -EINVAL;
goto err_unmap_src;
}
- addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
- cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
- cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
- if (dma_mapping_error(ce->dev, addr_res)) {
+
+ rctx->result_len = digestsize;
+ rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
+ DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
+ cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
+ if (dma_mapping_error(ce->dev, rctx->addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
goto err_unmap_src;
@@ -432,10 +411,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
goto err_unmap_result;
}
- addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
- cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
+ rctx->pad_len = j * 4;
+ rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
+ DMA_TO_DEVICE);
+ cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
- if (dma_mapping_error(ce->dev, addr_pad)) {
+ if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
goto err_unmap_result;
@@ -446,29 +427,59 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
else
cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
- chan->timeout = areq->nbytes;
-
- err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
-
- dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ return 0;
err_unmap_result:
- dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- if (!err)
- memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
err_unmap_src:
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
-err_free_result:
- kfree(result);
+err_out:
+ return err;
+}
-err_free_buf:
- kfree(buf);
+static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
+ struct ce_task *cet)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+
+ dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
+{
+ struct ahash_request *areq = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+
+ err = sun8i_ce_hash_prepare(areq, chan->tl);
+ if (err)
+ return err;
+
+ err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
+
+ sun8i_ce_hash_unprepare(areq, chan->tl);
+
+ if (!err)
+ memcpy(areq->result, rctx->result,
+ crypto_ahash_digestsize(tfm));
-err_out:
local_bh_disable();
- crypto_finalize_hash_request(engine, breq, err);
+ crypto_finalize_hash_request(engine, async_req, err);
local_bh_enable();
return 0;