From e64f9a4af5a379f5a9fde59f0f944cd10ccfc6ea Mon Sep 17 00:00:00 2001 From: Werner Koch Date: Wed, 13 Jul 2016 15:33:41 +0200 Subject: Speedup closing fds before an exec. * src/system-posix.c [__linux__]: Include dirent.h. (get_max_fds) [__linux__]: Return the actual used highest fd. -- This is the same code as introduced with the GnuPG commit 512c56af43027149e8beacf259746b8d7bf9b1a2 Signed-off-by: Werner Koch --- src/system-posix.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/src/system-posix.c b/src/system-posix.c index 8ca27e6..52376da 100644 --- a/src/system-posix.c +++ b/src/system-posix.c @@ -36,6 +36,9 @@ # include # include #endif /*HAVE_GETRLIMIT*/ +#if __linux__ +# include +#endif /*__linux__ */ #include "assuan-defs.h" @@ -180,6 +183,43 @@ get_max_fds (void) #ifdef HAVE_GETRLIMIT struct rlimit rl; + /* Under Linux we can figure out the highest used file descriptor by + * reading /proc/PID/fd. This is in the common cases much faster + * than for example doing 4096 close calls where almost all of them + * will fail. We use the same code in GnuPG and measured this: On a + * system with a limit of 4096 files and only 8 files open with the + * highest number being 10, we speedup close_all_fds from 125ms to + * 0.4ms including the readdir. + * + * Another option would be to close the file descriptors as returned + * from reading that directory - however then we need to snapshot + * that list before starting to close them. */ +#ifdef __linux__ + { + DIR *dir = NULL; + struct dirent *dir_entry; + const char *s; + int x; + + dir = opendir ("/proc/self/fd"); + if (dir) + { + while ((dir_entry = readdir (dir))) + { + s = dir_entry->d_name; + if ( *s < '0' || *s > '9') + continue; + x = atoi (s); + if (x > max_fds) + max_fds = x; + } + closedir (dir); + } + if (max_fds != -1) + return max_fds + 1; + } +#endif /* __linux__ */ + # ifdef RLIMIT_NOFILE if (!getrlimit (RLIMIT_NOFILE, &rl)) max_fds = rl.rlim_max; -- cgit v1.2.3