[PATCH 4/7] crypto: Fixes uninitialized skcipher_walk use in aesni-intel_glue

From: Yuran Pereira
Date: Thu Nov 02 2023 - 00:10:41 EST


In the following functions:
- `sm4_avx_ctr_crypt`
- `ecb_encrypt`
- `ecb_decrypt`
- `cbc_encrypt`
- `cbc_decrypt`
- `cts_cbc_encrypt`
- `cts_cbc_decrypt`
- `ctr_crypt`
- `xctr_crypt`
- `xts_crypt`

`struct skcipher_walk *walk` is not fully initialized before its use.

Although the call to `skcipher_walk_virt()` and subsequent functions
that this function calls seem to initialize some fields of this struct,

there is a chance that `skcipher_walk_virt()` returns
without fully clearing or properly initializing the `->flags` field
which means that the following flags:
`SKCIPHER_WALK_DIFF`, `SKCIPHER_WALK_COPY`, `SKCIPHER_WALK_SLOW`
could be storing junk values by the time `skcipher_walk_done()` is called.

This could lead to buggy or undefined behaviour since these flags
are checked in `skcipher_walk_done()`:

```C
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
...
if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
SKCIPHER_WALK_COPY |
SKCIPHER_WALK_DIFF)))) {
...
}
```

To prevent this, this patch ensures that instances of
`struct skcipher_walk` are correctly initialized prior to their use.

Addresses-Coverity-IDs: 139545, 1508669 ("Unintialized scalar variable")
Signed-off-by: Yuran Pereira <yuran.pereira@xxxxxxxxxxx>
---
arch/x86/crypto/aesni-intel_glue.c | 12 ++++++++++++
1 file changed, 12 insertions(+)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 39d6a62ac627..d2c5f00a33ff 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -35,6 +35,7 @@
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/static_call.h>
+#include <linux/string.h>


#define AESNI_ALIGN 16
@@ -296,6 +297,7 @@ static int ecb_encrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes)) {
@@ -318,6 +320,7 @@ static int ecb_decrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes)) {
@@ -340,6 +343,7 @@ static int cbc_encrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes)) {
@@ -362,6 +366,7 @@ static int cbc_decrypt(struct skcipher_request *req)
unsigned int nbytes;
int err;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes)) {
@@ -387,6 +392,8 @@ static int cts_cbc_encrypt(struct skcipher_request *req)
struct skcipher_walk walk;
int err;

+ memset(&walk, 0, sizeof(walk));
+
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
NULL, NULL);
@@ -443,6 +450,8 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
struct skcipher_walk walk;
int err;

+ memset(&walk, 0, sizeof(walk));
+
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
NULL, NULL);
@@ -515,6 +524,7 @@ static int ctr_crypt(struct skcipher_request *req)
unsigned int nbytes;
int err;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes) > 0) {
@@ -566,6 +576,7 @@ static int xctr_crypt(struct skcipher_request *req)
int err;
__le32 block[AES_BLOCK_SIZE / sizeof(__le32)];

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);

while ((nbytes = walk.nbytes) > 0) {
@@ -912,6 +923,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;

+ memset(&walk, 0, sizeof(walk));
err = skcipher_walk_virt(&walk, req, false);
if (!walk.nbytes)
return err;
--
2.25.1