summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c231
-rw-r--r--drivers/crypto/caam/caamalg_desc.c4
-rw-r--r--drivers/crypto/caam/caamalg_qi.c227
-rw-r--r--drivers/crypto/caam/caampkc.c71
-rw-r--r--drivers/crypto/caam/caampkc.h8
-rw-r--r--drivers/crypto/caam/ctrl.c78
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/qi.c5
-rw-r--r--drivers/crypto/caam/regs.h6
10 files changed, 361 insertions, 272 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 7207a535942dc..d67667970f7e2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -769,15 +769,18 @@ struct aead_edesc {
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
+ * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
+ * and IV
*/
struct ablkcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
+ enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -787,7 +790,8 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize,
+ enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -799,7 +803,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -810,7 +814,7 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0,
+ edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -823,7 +827,7 @@ static void ablkcipher_unmap(struct device *dev,
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize,
+ edesc->iv_dma, ivsize, edesc->iv_dir,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -912,6 +916,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
ivsize, 0);
+ /* In case initial IV was generated, copy it in GIVCIPHER request */
+ if (edesc->iv_dir == DMA_FROM_DEVICE) {
+ u8 *iv;
+ struct skcipher_givcrypt_request *greq;
+
+ greq = container_of(req, struct skcipher_givcrypt_request,
+ creq);
+ iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
+ edesc->sec4_sg_bytes;
+ memcpy(greq->giv, iv, ivsize);
+ }
+
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -922,10 +938,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct ablkcipher_request *req = context;
struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
@@ -943,14 +959,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
ablkcipher_unmap(jrdev, edesc, req);
-
- /*
- * The crypto API expects us to set the IV (req->info) to the last
- * ciphertext block.
- */
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
- ivsize, 0);
-
kfree(edesc);
ablkcipher_request_complete(req, err);
@@ -1099,15 +1107,14 @@ static void init_authenc_job(struct aead_request *req,
*/
static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req,
- bool iv_contig)
+ struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc = edesc->hw_desc;
- u32 out_options = 0, in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
+ u32 out_options = 0;
+ dma_addr_t dst_dma;
+ int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@@ -1123,30 +1130,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- if (iv_contig) {
- src_dma = edesc->iv_dma;
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents + 1;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ LDST_SGF);
if (likely(req->src == req->dst)) {
- if (edesc->src_nents == 1 && iv_contig) {
- dst_dma = sg_dma_address(req->src);
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
+ dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
} else {
if (edesc->dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index * sizeof(struct sec4_sg_entry);
+ dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
+ sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
}
@@ -1158,13 +1153,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
*/
static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req,
- bool iv_contig)
+ struct ablkcipher_request *req)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
u32 *desc = edesc->hw_desc;
- u32 out_options, in_options;
+ u32 in_options;
dma_addr_t dst_dma, src_dma;
int len, sec4_sg_index = 0;
@@ -1190,15 +1184,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
}
append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
- if (iv_contig) {
- dst_dma = edesc->iv_dma;
- out_options = 0;
- } else {
- dst_dma = edesc->sec4_sg_dma +
- sec4_sg_index * sizeof(struct sec4_sg_entry);
- out_options = LDST_SGF;
- }
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+ dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
}
/*
@@ -1287,7 +1275,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1491,8 +1479,7 @@ static int aead_decrypt(struct aead_request *req)
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes,
- bool *iv_contig_out)
+ *req, int desc_bytes)
{
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
@@ -1501,8 +1488,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma = 0;
- bool in_contig;
+ dma_addr_t iv_dma;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1546,33 +1533,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
}
- iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->src)) {
- in_contig = true;
- sec4_sg_ents = 0;
- } else {
- in_contig = false;
- sec4_sg_ents = 1 + mapped_src_nents;
- }
+ sec4_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- /* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ /*
+ * allocate space for base edesc and hw desc commands, link tables, IV
+ */
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1581,13 +1555,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
+ edesc->iv_dir = DMA_TO_DEVICE;
- if (!in_contig) {
- dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->src, mapped_src_nents,
- edesc->sec4_sg + 1, 0);
+ /* Make sure IV is located in a DMAable area */
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ memcpy(iv, req->info, ivsize);
+
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
}
+ dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
+
if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0);
@@ -1598,7 +1583,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1611,7 +1596,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = in_contig;
return edesc;
}
@@ -1621,19 +1605,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc,
- ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+ init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -1657,20 +1638,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
struct ablkcipher_edesc *edesc;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ ivsize, 0);
+
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec,
- ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
desc = edesc->hw_desc;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
@@ -1695,8 +1681,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
*/
static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
struct skcipher_givcrypt_request *greq,
- int desc_bytes,
- bool *iv_contig_out)
+ int desc_bytes)
{
struct ablkcipher_request *req = &greq->creq;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
@@ -1706,8 +1691,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma = 0;
- bool out_contig;
+ dma_addr_t iv_dma;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@@ -1752,36 +1737,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
}
- /*
- * Check if iv can be contiguous with source and destination.
- * If so, include it. If not, create scatterlist.
- */
- iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
dst_sg_idx = sec4_sg_ents;
- if (mapped_dst_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->dst)) {
- out_contig = true;
- } else {
- out_contig = false;
- sec4_sg_ents += 1 + mapped_dst_nents;
- }
+ sec4_sg_ents += 1 + mapped_dst_nents;
- /* allocate space for base edesc and hw desc commands, link tables */
+ /*
+ * allocate space for base edesc and hw desc commands, link tables, IV
+ */
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1790,24 +1759,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
+ edesc->iv_dir = DMA_FROM_DEVICE;
+
+ /* Make sure IV is located in a DMAable area */
+ iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
+ iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, iv_dma)) {
+ dev_err(jrdev, "unable to map IV\n");
+ caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, DMA_NONE, 0, 0);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
if (mapped_src_nents > 1)
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
0);
- if (!out_contig) {
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
- iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
- edesc->sec4_sg + dst_sg_idx + 1, 0);
- }
+ dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
+ dst_sg_idx + 1, 0);
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, 0, 0);
+ iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1820,7 +1798,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
sec4_sg_bytes, 1);
#endif
- *iv_contig_out = out_contig;
return edesc;
}
@@ -1831,19 +1808,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
- bool iv_contig = false;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &iv_contig);
+ edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req, iv_contig);
+ edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ablkcipher jobdesc@" __stringify(__LINE__) ": ",
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 8ae7a1be7dfd1..a408edd84f346 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1093,7 +1093,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1178,7 +1178,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
(0x6 << MOVE_LEN_SHIFT));
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
+ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
/* Will read assoclen + cryptlen bytes */
append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index cacda0831390c..6e61cc93c2b0d 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -728,7 +728,7 @@ badkey:
* @assoclen: associated data length, in CAAM endianness
* @assoclen_dma: bus physical mapped address of req->assoclen
* @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
*/
struct aead_edesc {
int src_nents;
@@ -739,9 +739,6 @@ struct aead_edesc {
unsigned int assoclen;
dma_addr_t assoclen_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_AEAD_SG \
- ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
- sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -753,7 +750,7 @@ struct aead_edesc {
* @qm_sg_bytes: length of dma mapped h/w link table
* @qm_sg_dma: bus physical mapped address of h/w link table
* @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
*/
struct ablkcipher_edesc {
int src_nents;
@@ -762,9 +759,6 @@ struct ablkcipher_edesc {
int qm_sg_bytes;
dma_addr_t qm_sg_dma;
struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_ABLKCIPHER_SG \
- ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
- sizeof(struct qm_sg_entry))
struct qm_sg_entry sgt[0];
};
@@ -986,17 +980,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
}
}
- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
ivsize = crypto_aead_ivsize(aead);
- iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, op_type, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
- }
/*
* Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
@@ -1004,16 +989,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
*/
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
- sg_table = &edesc->sgt[0];
- qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
@@ -1166,15 +1168,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#endif
ablkcipher_unmap(qidev, edesc, req);
- qi_cache_free(edesc);
+
+ /* In case initial IV was generated, copy it in GIVCIPHER request */
+ if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+ u8 *iv;
+ struct skcipher_givcrypt_request *greq;
+
+ greq = container_of(req, struct skcipher_givcrypt_request,
+ creq);
+ iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+ memcpy(greq->giv, iv, ivsize);
+ }
/*
* The crypto API expects us to set the IV (req->info) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
- ivsize, 0);
+ if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ ivsize, ivsize, 0);
+ qi_cache_free(edesc);
ablkcipher_request_complete(req, status);
}
@@ -1189,9 +1203,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma;
- bool in_contig;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, qm_sg_ents;
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
@@ -1238,55 +1252,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
}
}
- iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->src)) {
- in_contig = true;
- qm_sg_ents = 0;
- } else {
- in_contig = false;
- qm_sg_ents = 1 + mapped_src_nents;
- }
+ qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- /* allocate space for base edesc and link tables */
+ /* allocate space for base edesc, link tables and IV */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->info, ivsize);
+
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- sg_table = &edesc->sgt[0];
- edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = ablkcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
- if (!in_contig) {
- dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
- }
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
@@ -1304,20 +1316,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
- if (!in_contig)
- dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
- else
- dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
- 0);
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+ ivsize + req->nbytes, 0);
if (req->src == req->dst) {
- if (!in_contig)
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
- else
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
- req->nbytes, 0);
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+ sizeof(*sg_table), req->nbytes, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
sizeof(*sg_table), req->nbytes, 0);
@@ -1341,10 +1345,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
struct ablkcipher_edesc *edesc;
dma_addr_t iv_dma;
- bool out_contig;
+ u8 *iv;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents;
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct caam_drv_ctx *drv_ctx;
drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
@@ -1392,46 +1396,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
mapped_dst_nents = src_nents;
}
- iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
dst_sg_idx = qm_sg_ents;
- if (mapped_dst_nents == 1 &&
- iv_dma + ivsize == sg_dma_address(req->dst)) {
- out_contig = true;
- } else {
- out_contig = false;
- qm_sg_ents += 1 + mapped_dst_nents;
- }
- if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
- qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ qm_sg_ents += 1 + mapped_dst_nents;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
- /* allocate space for base edesc and link tables */
+ /* allocate space for base edesc, link tables and IV */
edesc = qi_cache_alloc(GFP_DMA | flags);
if (!edesc) {
dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0, 0);
+ qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- sg_table = &edesc->sgt[0];
- edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = ablkcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
@@ -1439,11 +1442,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
if (mapped_src_nents > 1)
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
- if (!out_contig) {
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
- dst_sg_idx + 1, 0);
- }
+ dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+ 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
DMA_TO_DEVICE);
@@ -1464,13 +1465,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
req->nbytes, 0);
- if (!out_contig)
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- ivsize + req->nbytes, 0);
+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table), ivsize + req->nbytes, 0);
return edesc;
}
@@ -1480,6 +1476,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
struct ablkcipher_edesc *edesc;
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
int ret;
if (unlikely(caam_congested))
@@ -1490,6 +1487,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+ /*
+ * The crypto API expects us to set the IV (req->info) to the last
+ * ciphertext block.
+ */
+ if (!encrypt)
+ scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ ivsize, ivsize, 0);
+
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 7a897209f1813..578ea63a31098 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
struct caam_rsa_key *key = &ctx->key;
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
akcipher_request_complete(req, err);
}
+static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
+ unsigned int nbytes,
+ unsigned int flags)
+{
+ struct sg_mapping_iter miter;
+ int lzeros, ents;
+ unsigned int len;
+ unsigned int tbytes = nbytes;
+ const u8 *buff;
+
+ ents = sg_nents_for_len(sgl, nbytes);
+ if (ents < 0)
+ return ents;
+
+ sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
+
+ lzeros = 0;
+ len = 0;
+ while (nbytes > 0) {
+ while (len && !*buff) {
+ lzeros++;
+ len--;
+ buff++;
+ }
+
+ if (len && *buff)
+ break;
+
+ sg_miter_next(&miter);
+ buff = miter.addr;
+ len = miter.length;
+
+ nbytes -= lzeros;
+ lzeros = 0;
+ }
+
+ miter.consumed = lzeros;
+ sg_miter_stop(&miter);
+ nbytes -= lzeros;
+
+ return tbytes - nbytes;
+}
+
static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
size_t desclen)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
struct device *dev = ctx->dev;
+ struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
struct rsa_edesc *edesc;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
+ int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
int sgc;
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
int src_nents, dst_nents;
+ int lzeros;
+
+ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
+ if (lzeros < 0)
+ return ERR_PTR(lzeros);
+
+ req->src_len -= lzeros;
+ req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
src_nents = sg_nents_for_len(req->src, req->src_len);
dst_nents = sg_nents_for_len(req->dst, req->dst_len);
@@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->d_dma)) {
@@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
int sec4_sg_index = 0;
size_t p_sz = key->p_sz;
- size_t q_sz = key->p_sz;
+ size_t q_sz = key->q_sz;
pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
if (dma_mapping_error(dev, pdb->p_dma)) {
@@ -730,19 +783,12 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
*/
static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
{
- u8 *val;
caam_rsa_drop_leading_zeros(&buf, nbytes);
if (!*nbytes)
return NULL;
- val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
- if (!val)
- return NULL;
-
- memcpy(val, buf, *nbytes);
-
- return val;
+ return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
}
static int caam_rsa_check_key_length(unsigned int len)
@@ -953,6 +999,7 @@ static struct akcipher_alg caam_rsa = {
.max_size = caam_rsa_max_size,
.init = caam_rsa_init_tfm,
.exit = caam_rsa_exit_tfm,
+ .reqsize = sizeof(struct caam_rsa_req_ctx),
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-caam",
diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
index fd145c46eae17..82645bcf8b27e 100644
--- a/drivers/crypto/caam/caampkc.h
+++ b/drivers/crypto/caam/caampkc.h
@@ -96,6 +96,14 @@ struct caam_rsa_ctx {
};
/**
+ * caam_rsa_req_ctx - per request context.
+ * @src: input scatterlist (stripped of leading zeros)
+ */
+struct caam_rsa_req_ctx {
+ struct scatterlist src[2];
+};
+
+/**
* rsa_edesc - s/w-extended rsa descriptor
* @src_nents : number of segments in input scatterlist
* @dst_nents : number of segments in output scatterlist
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index e4cc636e11044..538c01f428c16 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -322,9 +322,9 @@ static int caam_remove(struct platform_device *pdev)
/*
* De-initialize RNG state handles initialized by this driver.
- * In case of DPAA 2.x, RNG is managed by MC firmware.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
+ if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
@@ -396,11 +396,56 @@ start_rng:
clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
}
+static int caam_get_era_from_hw(struct caam_ctrl __iomem *ctrl)
+{
+ static const struct {
+ u16 ip_id;
+ u8 maj_rev;
+ u8 era;
+ } id[] = {
+ {0x0A10, 1, 1},
+ {0x0A10, 2, 2},
+ {0x0A12, 1, 3},
+ {0x0A14, 1, 3},
+ {0x0A14, 2, 4},
+ {0x0A16, 1, 4},
+ {0x0A10, 3, 4},
+ {0x0A11, 1, 4},
+ {0x0A18, 1, 4},
+ {0x0A11, 2, 5},
+ {0x0A12, 2, 5},
+ {0x0A13, 1, 5},
+ {0x0A1C, 1, 5}
+ };
+ u32 ccbvid, id_ms;
+ u8 maj_rev, era;
+ u16 ip_id;
+ int i;
+
+ ccbvid = rd_reg32(&ctrl->perfmon.ccb_id);
+ era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
+ if (era) /* This is '0' prior to CAAM ERA-6 */
+ return era;
+
+ id_ms = rd_reg32(&ctrl->perfmon.caam_id_ms);
+ ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
+ maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
+
+ for (i = 0; i < ARRAY_SIZE(id); i++)
+ if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
+ return id[i].era;
+
+ return -ENOTSUPP;
+}
+
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
- * on "sec-era" propery in the DTS. This property is updated by u-boot.
+ * on "sec-era" optional property in the DTS. This property is updated
+ * by u-boot.
+ * In case this property is not passed an attempt to retrieve the CAAM
+ * era via register reads will be made.
**/
-int caam_get_era(void)
+static int caam_get_era(struct caam_ctrl __iomem *ctrl)
{
struct device_node *caam_node;
int ret;
@@ -410,9 +455,11 @@ int caam_get_era(void)
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
of_node_put(caam_node);
- return ret ? -ENOTSUPP : prop;
+ if (!ret)
+ return prop;
+ else
+ return caam_get_era_from_hw(ctrl);
}
-EXPORT_SYMBOL(caam_get_era);
static const struct of_device_id caam_match[] = {
{
@@ -571,11 +618,15 @@ static int caam_probe(struct platform_device *pdev)
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register.
- * In case of DPAA 2.x, Management Complex firmware performs
+ * In case of SoCs with Management Complex, MC f/w performs
* the configuration.
*/
caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
- if (!caam_dpaa2)
+ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
+ ctrlpriv->mc_en = !!np;
+ of_node_put(np);
+
+ if (!ctrlpriv->mc_en)
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
MCFGR_WDENABLE | MCFGR_LARGE_BURST |
@@ -623,7 +674,7 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
- ctrlpriv->era = caam_get_era();
+ ctrlpriv->era = caam_get_era(ctrl);
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
@@ -686,9 +737,9 @@ static int caam_probe(struct platform_device *pdev)
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
- * In case of DPAA 2.x, RNG is managed by MC firmware.
+ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!caam_dpaa2 &&
+ if (!ctrlpriv->mc_en &&
(cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
@@ -757,9 +808,8 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
ctrlpriv->era);
- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
- caam_dpaa2 ? "yes" : "no");
+ dev_info(dev, "job rings = %d, qi = %d\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index be693a2cc25ef..f3ecd67922a72 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -9,8 +9,6 @@
#define CTRL_H
/* Prototypes for backend-level services exposed to APIs */
-int caam_get_era(void);
-
extern bool caam_dpaa2;
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 7696a774a3628..babc78abd155f 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -82,6 +82,7 @@ struct caam_drv_private {
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
+ u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
int era; /* CAAM Era (internal HW revision) */
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index b9480828da385..67f7f8c42c932 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -657,9 +657,8 @@ static int init_cgr(struct device *qidev)
{
int ret;
struct qm_mcc_initcgr opts;
- const u64 cpus = *(u64 *)qman_affine_cpus();
- const int num_cpus = hweight64(cpus);
- const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
+ const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
+ MAX_RSP_FQ_BACKLOG_PER_CPU;
ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
if (ret) {
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index fee363865d88e..4fb91ba39c36b 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -312,11 +312,17 @@ struct caam_perfmon {
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
+#define CCBVID_ERA_MASK 0xff000000
+#define CCBVID_ERA_SHIFT 24
u32 ccb_id; /* CCBVID - CCB Version ID */
u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+#define SECVID_MS_IPID_MASK 0xffff0000
+#define SECVID_MS_IPID_SHIFT 16
+#define SECVID_MS_MAJ_REV_MASK 0x0000ff00
+#define SECVID_MS_MAJ_REV_SHIFT 8
u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
};