[PATCH 4.16 05/72] rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock

From: Greg Kroah-Hartman
Date: Mon May 14 2018 - 03:07:28 EST


4.16-stable review patch. If anyone has any objections, please let me know.

------------------

From: Sowmini Varadhan <sowmini.varadhan@xxxxxxxxxx>

commit 53d0e83f9329aa51dcc205b514dbee05cb4df309 upstream.

rds_tcp_connection allocation/free management has the potential to be
called from __rds_conn_create after IRQs have been disabled, so
spin_[un]lock_bh cannot be used with rds_tcp_conn_lock.

Bottom-halves that need to synchronize for critical sections protected
by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly.

Reported-by: syzbot+c68e51bb5e699d3f8d91@xxxxxxxxxxxxxxxxxxxxxxxxx
Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize
netns/module teardown and rds connection/workq management")
Signed-off-by: Sowmini Varadhan <sowmini.varadhan@xxxxxxxxxx>
Acked-by: Santosh Shilimkar <santosh.shilimkar@xxxxxxxxxx>
Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
net/rds/tcp.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)

--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -275,13 +275,14 @@ static int rds_tcp_laddr_check(struct ne
static void rds_tcp_conn_free(void *arg)
{
struct rds_tcp_connection *tc = arg;
+ unsigned long flags;

rdsdebug("freeing tc %p\n", tc);

- spin_lock_bh(&rds_tcp_conn_lock);
+ spin_lock_irqsave(&rds_tcp_conn_lock, flags);
if (!tc->t_tcp_node_detached)
list_del(&tc->t_tcp_node);
- spin_unlock_bh(&rds_tcp_conn_lock);
+ spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);

kmem_cache_free(rds_tcp_conn_slab, tc);
}
@@ -311,13 +312,13 @@ static int rds_tcp_conn_alloc(struct rds
rdsdebug("rds_conn_path [%d] tc %p\n", i,
conn->c_path[i].cp_transport_data);
}
- spin_lock_bh(&rds_tcp_conn_lock);
+ spin_lock_irq(&rds_tcp_conn_lock);
for (i = 0; i < RDS_MPATH_WORKERS; i++) {
tc = conn->c_path[i].cp_transport_data;
tc->t_tcp_node_detached = false;
list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
}
- spin_unlock_bh(&rds_tcp_conn_lock);
+ spin_unlock_irq(&rds_tcp_conn_lock);
fail:
if (ret) {
for (j = 0; j < i; j++)
@@ -529,7 +530,7 @@ static void rds_tcp_kill_sock(struct net

rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
- spin_lock_bh(&rds_tcp_conn_lock);
+ spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);

@@ -542,7 +543,7 @@ static void rds_tcp_kill_sock(struct net
tc->t_tcp_node_detached = true;
}
}
- spin_unlock_bh(&rds_tcp_conn_lock);
+ spin_unlock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
}
@@ -590,7 +591,7 @@ static void rds_tcp_sysctl_reset(struct
{
struct rds_tcp_connection *tc, *_tc;

- spin_lock_bh(&rds_tcp_conn_lock);
+ spin_lock_irq(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);

@@ -600,7 +601,7 @@ static void rds_tcp_sysctl_reset(struct
/* reconnect with new parameters */
rds_conn_path_drop(tc->t_cpath, false);
}
- spin_unlock_bh(&rds_tcp_conn_lock);
+ spin_unlock_irq(&rds_tcp_conn_lock);
}

static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,