[PATCH 4.4 044/103] ext4 crypto: dont let data integrity writebacks fail with ENOMEM

From: Greg Kroah-Hartman
Date: Tue May 23 2017 - 16:49:51 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Theodore Ts'o <tytso@xxxxxxx>

commit c9af28fdd44922a6c10c9f8315718408af98e315 upstream.

We don't want the writeback triggered from the journal commit (in
data=writeback mode) to cause the journal to abort due to
generic_writepages() returning an ENOMEM error. In addition, if
fsync() fails with ENOMEM, most applications will probably not do the
right thing.

So if we are doing a data integrity sync, and ext4_encrypt() returns
ENOMEM, we will submit any queued I/O to date, and then retry the
allocation using GFP_NOFAIL.

Google-Bug-Id: 27641567

Signed-off-by: Theodore Ts'o <tytso@xxxxxxx>
Signed-off-by: Eric Biggers <ebiggers@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
fs/ext4/crypto.c | 37 +++++++++++++++++++++----------------
fs/ext4/ext4.h | 6 ++++--
fs/ext4/page-io.c | 14 +++++++++++++-
fs/ext4/readpage.c | 2 +-
4 files changed, 39 insertions(+), 20 deletions(-)

--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -94,7 +94,8 @@ void ext4_release_crypto_ctx(struct ext4
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode)
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx = NULL;
int res = 0;
@@ -121,7 +122,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_
list_del(&ctx->free_list);
spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
if (!ctx) {
- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
+ ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
if (!ctx) {
res = -ENOMEM;
goto out;
@@ -258,7 +259,8 @@ static int ext4_page_crypto(struct inode
ext4_direction_t rw,
pgoff_t index,
struct page *src_page,
- struct page *dest_page)
+ struct page *dest_page,
+ gfp_t gfp_flags)

{
u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
@@ -269,7 +271,7 @@ static int ext4_page_crypto(struct inode
struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;

- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ req = ablkcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
@@ -310,9 +312,10 @@ static int ext4_page_crypto(struct inode
return 0;
}

-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx)
+static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
+ gfp_t gfp_flags)
{
- ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT);
+ ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= EXT4_WRITE_PATH_FL;
@@ -335,7 +338,8 @@ static struct page *alloc_bounce_page(st
* error value or NULL.
*/
struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page)
+ struct page *plaintext_page,
+ gfp_t gfp_flags)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
@@ -343,17 +347,17 @@ struct page *ext4_encrypt(struct inode *

BUG_ON(!PageLocked(plaintext_page));

- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *) ctx;

/* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = plaintext_page;
err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
+ plaintext_page, ciphertext_page, gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
errout:
@@ -381,8 +385,8 @@ int ext4_decrypt(struct page *page)
{
BUG_ON(!PageLocked(page));

- return ext4_page_crypto(page->mapping->host,
- EXT4_DECRYPT, page->index, page, page);
+ return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
+ page->index, page, page, GFP_NOFS);
}

int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
@@ -403,11 +407,11 @@ int ext4_encrypted_zeroout(struct inode

BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE);

- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);

- ciphertext_page = alloc_bounce_page(ctx);
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
@@ -415,11 +419,12 @@ int ext4_encrypted_zeroout(struct inode

while (len--) {
err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page);
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
if (err)
goto errout;

- bio = bio_alloc(GFP_KERNEL, 1);
+ bio = bio_alloc(GFP_NOWAIT, 1);
if (!bio) {
err = -ENOMEM;
goto errout;
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2261,11 +2261,13 @@ extern struct kmem_cache *ext4_crypt_inf
bool ext4_valid_contents_enc_mode(uint32_t mode);
uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
extern struct workqueue_struct *ext4_read_workqueue;
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode);
+struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
+ gfp_t gfp_flags);
void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
void ext4_restore_control_page(struct page *data_page);
struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page);
+ struct page *plaintext_page,
+ gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
extern const struct dentry_operations ext4_encrypted_d_ops;
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/backing-dev.h>

#include "ext4_jbd2.h"
#include "xattr.h"
@@ -485,9 +486,20 @@ int ext4_bio_write_page(struct ext4_io_s

if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
nr_to_submit) {
- data_page = ext4_encrypt(inode, page);
+ gfp_t gfp_flags = GFP_NOFS;
+
+ retry_encrypt:
+ data_page = ext4_encrypt(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
+ if (ret == ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
+ if (io->io_bio) {
+ ext4_io_submit(io);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ }
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
data_page = NULL;
goto out;
}
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_

if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- ctx = ext4_get_crypto_ctx(inode);
+ ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
}