[PATCH v5] sctp: fix refcount bug in sctp_wfree

From: Qiujun Huang
Date: Thu Mar 26 2020 - 21:19:23 EST


We should iterate over the datamsgs to modify
all chunks(skbs) to newsk.

The following case cause the bug:
for the trouble SKB, it was in outq->transmitted list

sctp_outq_sack
sctp_check_transmitted
SKB was moved to outq->sacked list
then throw away the sack queue
SKB was deleted from outq->sacked
(but it was held by datamsg at sctp_datamsg_to_asoc
So, sctp_wfree was not called here)

then migrate happened

sctp_for_each_tx_datachunk(
sctp_clear_owner_w);
sctp_assoc_migrate();
sctp_for_each_tx_datachunk(
sctp_set_owner_w);
SKB was not in the outq, and was not changed to newsk

finally

__sctp_outq_teardown
sctp_chunk_put (for another skb)
sctp_datamsg_put
__kfree_skb(msg->frag_list)
sctp_wfree (for SKB)
SKB->sk was still oldsk (skb->sk != asoc->base.sk).

Reported-and-tested-by:syzbot+cea71eec5d6de256d54d@xxxxxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Qiujun Huang <hqjagain@xxxxxxxxx>
---
net/sctp/socket.c | 30 ++++++++++++++++++++++--------
1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1b56fc440606..75acbd5d4597 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -147,29 +147,43 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
skb_orphan(chunk->skb);
}

+#define traverse_and_process() \
+do { \
+ msg = chunk->msg; \
+ if (msg == prev_msg) \
+ continue; \
+ list_for_each_entry(c, &msg->chunks, frag_list) { \
+ if ((clear && asoc->base.sk == c->skb->sk) || \
+ (!clear && asoc->base.sk != c->skb->sk)) \
+ cb(c); \
+ } \
+} while (0)
+
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ bool clear,
void (*cb)(struct sctp_chunk *))

{
+ struct sctp_datamsg *msg, *prev_msg = NULL;
struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_chunk *chunk, *c;
struct sctp_transport *t;
- struct sctp_chunk *chunk;

list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
- cb(chunk);
+ traverse_and_process();

list_for_each_entry(chunk, &q->retransmit, transmitted_list)
- cb(chunk);
+ traverse_and_process();

list_for_each_entry(chunk, &q->sacked, transmitted_list)
- cb(chunk);
+ traverse_and_process();

list_for_each_entry(chunk, &q->abandoned, transmitted_list)
- cb(chunk);
+ traverse_and_process();

list_for_each_entry(chunk, &q->out_chunk_list, list)
- cb(chunk);
+ traverse_and_process();
}

static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
@@ -9574,9 +9588,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
- sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
+ sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
- sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
+ sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);

/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
--
2.17.1