[PATCH RESEND] fs/epoll: fix the edge-triggered mode for nested epoll

From: hev
Date: Mon Sep 02 2019 - 01:22:09 EST


From: Heiher <r@xxxxxx>

The structure of event pools:
efd[1]: { efd[2] (EPOLLIN) } efd[0]: { efd[2] (EPOLLIN | EPOLLET) }
| |
+-----------------+-----------------+
|
v
efd[2]: { sfd[0] (EPOLLIN) }

When sfd[0] to be readable:
* the epoll_wait(efd[0], ..., 0) should return efd[2]'s events on first call,
and returns 0 on next calls, because efd[2] is added in edge-triggered mode.
* the epoll_wait(efd[1], ..., 0) should returns efd[2]'s events on every calls
until efd[2] is not readable (epoll_wait(efd[2], ...) => 0), because efd[1]
is added in level-triggered mode.
* the epoll_wait(efd[2], ..., 0) should returns sfd[0]'s events on every calls
until sfd[0] is not readable (read(sfd[0], ...) => EAGAIN), because sfd[0]
is added in level-triggered mode.

Test code:
#include <stdio.h>
#include <unistd.h>
#include <sys/epoll.h>
#include <sys/socket.h>

int main(int argc, char *argv[])
{
int sfd[2];
int efd[3];
int nfds;
struct epoll_event e;

if (socketpair(AF_UNIX, SOCK_STREAM, 0, sfd) < 0)
goto out;

efd[0] = epoll_create(1);
if (efd[0] < 0)
goto out;

efd[1] = epoll_create(1);
if (efd[1] < 0)
goto out;

efd[2] = epoll_create(1);
if (efd[2] < 0)
goto out;

e.events = EPOLLIN;
if (epoll_ctl(efd[2], EPOLL_CTL_ADD, sfd[0], &e) < 0)
goto out;

e.events = EPOLLIN;
if (epoll_ctl(efd[1], EPOLL_CTL_ADD, efd[2], &e) < 0)
goto out;

e.events = EPOLLIN | EPOLLET;
if (epoll_ctl(efd[0], EPOLL_CTL_ADD, efd[2], &e) < 0)
goto out;

if (write(sfd[1], "w", 1) != 1)
goto out;

nfds = epoll_wait(efd[0], &e, 1, 0);
if (nfds != 1)
goto out;

nfds = epoll_wait(efd[0], &e, 1, 0);
if (nfds != 0)
goto out;

nfds = epoll_wait(efd[1], &e, 1, 0);
if (nfds != 1)
goto out;

nfds = epoll_wait(efd[1], &e, 1, 0);
if (nfds != 1)
goto out;

nfds = epoll_wait(efd[2], &e, 1, 0);
if (nfds != 1)
goto out;

nfds = epoll_wait(efd[2], &e, 1, 0);
if (nfds != 1)
goto out;

close(efd[2]);
close(efd[1]);
close(efd[0]);
close(sfd[0]);
close(sfd[1]);

printf("PASS\n");
return 0;

out:
printf("FAIL\n");
return -1;
}

Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Davide Libenzi <davidel@xxxxxxxxxxxxxxx>
Cc: Davidlohr Bueso <dave@xxxxxxxxxxxx>
Cc: Dominik Brodowski <linux@xxxxxxxxxxxxxxxxxxxx>
Cc: Eric Wong <e@xxxxxxxxx>
Cc: Jason Baron <jbaron@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Roman Penyaev <rpenyaev@xxxxxxx>
Cc: Sridhar Samudrala <sridhar.samudrala@xxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: linux-fsdevel@xxxxxxxxxxxxxxx
Signed-off-by: hev <r@xxxxxx>
---
fs/eventpoll.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index d7f1f5011fac..a44cb27c636c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -672,6 +672,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
{
__poll_t res;
int pwake = 0;
+ int nwake = 0;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);

@@ -685,6 +686,9 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
if (!ep_locked)
mutex_lock_nested(&ep->mtx, depth);

+ if (!depth || list_empty(&ep->rdllist))
+ nwake = 1;
+
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
@@ -739,7 +743,7 @@ static __poll_t ep_scan_ready_list(struct eventpoll *ep,
list_splice(&txlist, &ep->rdllist);
__pm_relax(ep->ws);

- if (!list_empty(&ep->rdllist)) {
+ if (nwake && !list_empty(&ep->rdllist)) {
/*
* Wake up (if active) both the eventpoll wait list and
* the ->poll() wait list (delayed after we release the lock).
--
2.23.0