From 18329a436691d2b5abc9867c5bcbfece42579935 Mon Sep 17 00:00:00 2001 From: Anatoly Burakov Date: Tue, 4 Sep 2018 16:15:44 +0100 Subject: [PATCH] mem: raise maximum fd limit unconditionally Previously, when we allocated hugepages, we closed the fd's corresponding to them after we've done our mappings. Since we did mmap(), we didn't actually lose the reference, but file descriptors used for mmap() do not count against the fd limit. Since we are going to store all of our fd's, we will hit the fd limit much more often when using smaller page sizes. Fix this to raise the fd limit to maximum unconditionally. Signed-off-by: Anatoly Burakov Reviewed-by: Maxime Coquelin --- lib/librte_eal/linuxapp/eal/eal_memory.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index dbf19499e5..dfb537f59e 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -2204,6 +2205,25 @@ memseg_secondary_init(void) int rte_eal_memseg_init(void) { + /* increase rlimit to maximum */ + struct rlimit lim; + + if (getrlimit(RLIMIT_NOFILE, &lim) == 0) { + /* set limit to maximum */ + lim.rlim_cur = lim.rlim_max; + + if (setrlimit(RLIMIT_NOFILE, &lim) < 0) { + RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n", + strerror(errno)); + } else { + RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %" + PRIu64 "\n", + (uint64_t)lim.rlim_cur); + } + } else { + RTE_LOG(ERR, EAL, "Cannot get current resource limits\n"); + } + return rte_eal_process_type() == RTE_PROC_PRIMARY ? #ifndef RTE_ARCH_64 memseg_primary_init_32() :