[PATCH v2 08/14] crypto: sun8i-ce: split into prepare/run/unprepare

From: Corentin Labbe
Date: Fri Apr 24 2020 - 10:02:48 EST


This patch split the do_one_request into three.
Prepare will handle all DMA mapping and initialisation of the task
structure.
Unprepare will clean all DMA mapping.
And the do_one_request will be limited to just executing the task.

Signed-off-by: Corentin Labbe <clabbe@xxxxxxxxxxxx>
---
.../allwinner/sun8i-ce/sun8i-ce-cipher.c | 70 ++++++++++++++++---
drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h | 4 ++
2 files changed, 66 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 7716fa2d3250..d662dac83361 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -78,8 +78,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}

-static int sun8i_ce_cipher(struct skcipher_request *areq)
+static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
@@ -237,7 +238,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
}

chan->timeout = areq->cryptlen;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+ rctx->nr_sgs = nr_sgs;
+ rctx->nr_sgd = nr_sgd;
+ return 0;

theend_sgs:
if (areq->src == areq->dst) {
@@ -271,13 +274,64 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
return err;
}

-static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
- int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;

- err = sun8i_ce_cipher(breq);
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
crypto_finalize_skcipher_request(engine, breq, err);
+ return 0;
+}
+
+static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+{
+ struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+ struct sun8i_ce_flow *chan;
+ struct ce_task *cet;
+ unsigned int ivsize, offset;
+ int nr_sgs = rctx->nr_sgs;
+ int nr_sgd = rctx->nr_sgd;
+ int flow;
+
+ flow = rctx->flow;
+ chan = &ce->chanlist[flow];
+ cet = chan->tl;
+ ivsize = crypto_skcipher_ivsize(tfm);
+
+ if (areq->src == areq->dst) {
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+ } else {
+ if (nr_sgs > 0)
+ dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+ }
+
+ if (areq->iv && ivsize > 0) {
+ if (cet->t_iv)
+ dma_unmap_single(ce->dev, cet->t_iv, rctx->ivlen,
+ DMA_TO_DEVICE);
+ offset = areq->cryptlen - ivsize;
+ if (rctx->op_dir & CE_DECRYPTION) {
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ kzfree(rctx->backup_iv);
+ } else {
+ scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+ ivsize, 0);
+ }
+ kfree(rctx->bounce_iv);
+ }
+
+ dma_unmap_single(ce->dev, cet->t_key, op->keylen, DMA_TO_DEVICE);

return 0;
}
@@ -347,9 +401,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(&op->fallback_tfm->base)));

- op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
- op->enginectx.op.prepare_request = NULL;
- op->enginectx.op.unprepare_request = NULL;
+ op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
+ op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
+ op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;

err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index c9c7ef8299e2..fe97fee74e47 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -182,6 +182,8 @@ struct sun8i_ce_dev {
* @backup_iv: buffer which contain the next IV to store
* @bounce_iv: buffer which contain a copy of IV
* @ivlen: size of bounce_iv
+ * @nr_sgs: The number of source SG (as given by dma_map_sg())
+ * @nr_sgd: The number of destination SG (as given by dma_map_sg())
*/
struct sun8i_cipher_req_ctx {
u32 op_dir;
@@ -189,6 +191,8 @@ struct sun8i_cipher_req_ctx {
void *backup_iv;
void *bounce_iv;
unsigned int ivlen;
+ int nr_sgs;
+ int nr_sgd;
};

/*
--
2.26.2