mirror of /home/gitosis/repositories/libev.git
*** empty log message ***
parent
a242c41779
commit
c799314f36
2
Changes
2
Changes
|
@ -1,10 +1,12 @@
|
|||
Revision history for libev, a high-performance and full-featured event loop.
|
||||
|
||||
TODO: revisit 59.x timer in the light of mdoenr powersaving
|
||||
TODO: maybe use timerfd to detect time jumps on linux
|
||||
- linuxaio backend might have tried to cancel an iocb
|
||||
multiple times (was unable to trigger this).
|
||||
- io_cancel can return EINTR, deal with it. also, assume
|
||||
io_asubmit also retursn EINTR.
|
||||
- fix some othe rminor bugs in linuxaio backend.
|
||||
- cleanup: replace expect_true/false and noinline by their
|
||||
libecb counterparts.
|
||||
- move syscall infrastructure from ev_linuxaio.c to ev.c.
|
||||
|
|
|
@ -4,7 +4,8 @@ VERSION_INFO = 4:0:0
|
|||
|
||||
EXTRA_DIST = LICENSE Changes libev.m4 autogen.sh \
|
||||
ev_vars.h ev_wrap.h \
|
||||
ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_linuxaio.c ev_win32.c \
|
||||
ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_linuxaio.c ev_iouring.c \
|
||||
ev_win32.c \
|
||||
ev.3 ev.pod Symbols.ev Symbols.event
|
||||
|
||||
man_MANS = ev.3
|
||||
|
|
154
ev.c
154
ev.c
|
@ -449,11 +449,11 @@
|
|||
|
||||
#if EV_USE_LINUXAIO
|
||||
# include <sys/syscall.h>
|
||||
# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */
|
||||
# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
|
||||
# define EV_NEED_SYSCALL 1
|
||||
# else
|
||||
# undef EV_USE_LINUXAIO
|
||||
# define EV_USE_LINUXAIO 0
|
||||
# else
|
||||
# define EV_NEED_SYSCALL 1
|
||||
# endif
|
||||
#endif
|
||||
|
||||
|
@ -464,7 +464,7 @@
|
|||
# define SYS_io_uring_enter 426
|
||||
# define SYS_io_uring_wregister 427
|
||||
# endif
|
||||
# if SYS_io_uring_setup
|
||||
# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
|
||||
# define EV_NEED_SYSCALL 1
|
||||
# else
|
||||
# undef EV_USE_IOURING
|
||||
|
@ -522,65 +522,6 @@ struct signalfd_siginfo
|
|||
|
||||
/*****************************************************************************/
|
||||
|
||||
#if EV_NEED_SYSCALL
|
||||
|
||||
#include <sys/syscall.h>
|
||||
|
||||
/*
|
||||
* define some syscall wrappers for common architectures
|
||||
* this is mostly for nice looks during debugging, not performance.
|
||||
* our syscalls return < 0, not == -1, on error. which is good
|
||||
* enough for linux aio.
|
||||
* TODO: arm is also common nowadays, maybe even mips and x86
|
||||
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
|
||||
*/
|
||||
#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
|
||||
/* the costly errno access probably kills this for size optimisation */
|
||||
|
||||
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
|
||||
({ \
|
||||
long res; \
|
||||
register unsigned long r5 __asm__ ("r8" ); \
|
||||
register unsigned long r4 __asm__ ("r10"); \
|
||||
register unsigned long r3 __asm__ ("rdx"); \
|
||||
register unsigned long r2 __asm__ ("rsi"); \
|
||||
register unsigned long r1 __asm__ ("rdi"); \
|
||||
if (narg >= 5) r5 = (unsigned long)(arg5); \
|
||||
if (narg >= 4) r4 = (unsigned long)(arg4); \
|
||||
if (narg >= 3) r3 = (unsigned long)(arg3); \
|
||||
if (narg >= 2) r2 = (unsigned long)(arg2); \
|
||||
if (narg >= 1) r1 = (unsigned long)(arg1); \
|
||||
__asm__ __volatile__ ( \
|
||||
"syscall\n\t" \
|
||||
: "=a" (res) \
|
||||
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
|
||||
: "cc", "r11", "cx", "memory"); \
|
||||
errno = -res; \
|
||||
res; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef ev_syscall
|
||||
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
|
||||
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
|
||||
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
|
||||
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
|
||||
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
|
||||
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
|
||||
#else
|
||||
#define ev_syscall0(nr) syscall (nr)
|
||||
#define ev_syscall1(nr,arg1) syscall (nr, arg1)
|
||||
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
|
||||
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
|
||||
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
|
||||
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
#if EV_VERIFY >= 3
|
||||
# define EV_FREQUENT_CHECK ev_verify (EV_A)
|
||||
#else
|
||||
|
@ -594,8 +535,16 @@ struct signalfd_siginfo
|
|||
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
|
||||
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
|
||||
|
||||
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
|
||||
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
|
||||
#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
|
||||
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
|
||||
|
||||
/* find a portable timestamp that is "alawys" in the future but fits into time_t.
|
||||
* this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
|
||||
* and sizes large than 32 bit, but and maybe the unlikely loating point time_t */
|
||||
#define EV_TSTAMP_HUGE \
|
||||
(sizeof (time_t) >= 8 ? 10000000000000. \
|
||||
: 0 < (time_t)4294967295 ? 4294967295. \
|
||||
: 2147483647.) \
|
||||
|
||||
#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
|
||||
#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
|
||||
|
@ -1645,6 +1594,72 @@ ecb_binary32_to_binary16 (uint32_t x)
|
|||
# define inline_speed ecb_noinline static
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/* raw syscall wrappers */
|
||||
|
||||
#if EV_NEED_SYSCALL
|
||||
|
||||
#include <sys/syscall.h>
|
||||
|
||||
/*
|
||||
* define some syscall wrappers for common architectures
|
||||
* this is mostly for nice looks during debugging, not performance.
|
||||
* our syscalls return < 0, not == -1, on error. which is good
|
||||
* enough for linux aio.
|
||||
* TODO: arm is also common nowadays, maybe even mips and x86
|
||||
* TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
|
||||
*/
|
||||
#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
|
||||
/* the costly errno access probably kills this for size optimisation */
|
||||
|
||||
#define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
|
||||
({ \
|
||||
long res; \
|
||||
register unsigned long r6 __asm__ ("r9" ); \
|
||||
register unsigned long r5 __asm__ ("r8" ); \
|
||||
register unsigned long r4 __asm__ ("r10"); \
|
||||
register unsigned long r3 __asm__ ("rdx"); \
|
||||
register unsigned long r2 __asm__ ("rsi"); \
|
||||
register unsigned long r1 __asm__ ("rdi"); \
|
||||
if (narg >= 6) r6 = (unsigned long)(arg6); \
|
||||
if (narg >= 5) r5 = (unsigned long)(arg5); \
|
||||
if (narg >= 4) r4 = (unsigned long)(arg4); \
|
||||
if (narg >= 3) r3 = (unsigned long)(arg3); \
|
||||
if (narg >= 2) r2 = (unsigned long)(arg2); \
|
||||
if (narg >= 1) r1 = (unsigned long)(arg1); \
|
||||
__asm__ __volatile__ ( \
|
||||
"syscall\n\t" \
|
||||
: "=a" (res) \
|
||||
: "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
|
||||
: "cc", "r11", "cx", "memory"); \
|
||||
errno = -res; \
|
||||
res; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef ev_syscall
|
||||
#define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
|
||||
#define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
|
||||
#define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
|
||||
#define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
|
||||
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
|
||||
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
|
||||
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
|
||||
#else
|
||||
#define ev_syscall0(nr) syscall (nr)
|
||||
#define ev_syscall1(nr,arg1) syscall (nr, arg1)
|
||||
#define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
|
||||
#define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
|
||||
#define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
|
||||
#define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
|
||||
#define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
|
||||
|
||||
#if EV_MINPRI == EV_MAXPRI
|
||||
|
@ -1884,7 +1899,7 @@ typedef struct
|
|||
unsigned char events; /* the events watched for */
|
||||
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
|
||||
unsigned char emask; /* some backends store the actual kernel mask in here */
|
||||
unsigned char unused;
|
||||
unsigned char eflags; /* flags field for use by backends */
|
||||
#if EV_USE_EPOLL
|
||||
unsigned int egen; /* generation counter to counter epoll bugs */
|
||||
#endif
|
||||
|
@ -2917,6 +2932,13 @@ ev_embeddable_backends (void) EV_NOEXCEPT
|
|||
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
|
||||
flags &= ~EVBACKEND_EPOLL;
|
||||
|
||||
/* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
|
||||
|
||||
/* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
|
||||
* because our backend_fd is the epoll fd we need as fallback.
|
||||
* if the kernel ever is fixed, this might change...
|
||||
*/
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
|
4
ev.h
4
ev.h
|
@ -522,8 +522,8 @@ enum {
|
|||
EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
|
||||
EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
|
||||
EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
|
||||
EVBACKEND_LINUXAIO = 0x00000040U, /* linuix AIO */
|
||||
EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, TBD */
|
||||
EVBACKEND_LINUXAIO = 0x00000040U, /* linuix AIO, 4.19+ */
|
||||
EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */
|
||||
EVBACKEND_ALL = 0x000000FFU, /* all known backends */
|
||||
EVBACKEND_MASK = 0x0000FFFFU /* all future backends */
|
||||
};
|
||||
|
|
15
ev.pod
15
ev.pod
|
@ -1751,7 +1751,7 @@ reuse the same code path.
|
|||
|
||||
=head3 The special problem of fork
|
||||
|
||||
Some backends (epoll, kqueue, probably linuxaio) do not support C<fork ()>
|
||||
Some backends (epoll, kqueue, linuxaio, iouring) do not support C<fork ()>
|
||||
at all or exhibit useless behaviour. Libev fully supports fork, but needs
|
||||
to be told about it in the child if you want to continue to use it in the
|
||||
child.
|
||||
|
@ -4486,6 +4486,7 @@ in your include path (e.g. in libev/ when using -Ilibev):
|
|||
ev_poll.c only when poll backend is enabled
|
||||
ev_epoll.c only when the epoll backend is enabled
|
||||
ev_linuxaio.c only when the linux aio backend is enabled
|
||||
ev_iouring.c only when the linux io_uring backend is enabled
|
||||
ev_kqueue.c only when the kqueue backend is enabled
|
||||
ev_port.c only when the solaris port backend is enabled
|
||||
|
||||
|
@ -4688,10 +4689,16 @@ headers indicate GNU/Linux + Glibc 2.4 or newer, otherwise disabled.
|
|||
|
||||
=item EV_USE_LINUXAIO
|
||||
|
||||
If defined to be C<1>, libev will compile in support for the Linux aio
|
||||
backend (C<EV_USE_EPOLL> must also be enabled). If undefined, it will be
|
||||
enabled on linux, otherwise disabled.
|
||||
|
||||
=item EV_USE_IOURING
|
||||
|
||||
If defined to be C<1>, libev will compile in support for the Linux
|
||||
aio backend. Due to it's currenbt limitations it has to be requested
|
||||
explicitly. If undefined, it will be enabled on linux, otherwise
|
||||
disabled.
|
||||
io_uring backend (C<EV_USE_EPOLL> must also be enabled). Due to it's
|
||||
current limitations it has to be requested explicitly. If undefined, it
|
||||
will be enabled on linux, otherwise disabled.
|
||||
|
||||
=item EV_USE_KQUEUE
|
||||
|
||||
|
|
|
@ -197,6 +197,8 @@ epoll_poll (EV_P_ ev_tstamp timeout)
|
|||
* above with the gencounter check (== our fd is not the event fd), and
|
||||
* partially here, when epoll_ctl returns an error (== a child has the fd
|
||||
* but we closed it).
|
||||
* note: for events such as POLLHUP, where we can't know whether it refers
|
||||
* to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls.
|
||||
*/
|
||||
ev->events = (want & EV_READ ? EPOLLIN : 0)
|
||||
| (want & EV_WRITE ? EPOLLOUT : 0);
|
||||
|
@ -282,8 +284,8 @@ epoll_destroy (EV_P)
|
|||
array_free (epoll_eperm, EMPTY);
|
||||
}
|
||||
|
||||
inline_size
|
||||
void
|
||||
ecb_cold
|
||||
static void
|
||||
epoll_fork (EV_P)
|
||||
{
|
||||
close (backend_fd);
|
||||
|
|
|
@ -0,0 +1,648 @@
|
|||
/*
|
||||
* libev linux io_uring fd activity backend
|
||||
*
|
||||
* Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without modifica-
|
||||
* tion, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
|
||||
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
|
||||
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
|
||||
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
* OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* the GNU General Public License ("GPL") version 2 or any later version,
|
||||
* in which case the provisions of the GPL are applicable instead of
|
||||
* the above. If you wish to allow the use of your version of this file
|
||||
* only under the terms of the GPL and not to allow others to use your
|
||||
* version of this file under the BSD license, indicate your decision
|
||||
* by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL. If you do not delete the
|
||||
* provisions above, a recipient may use your version of this file under
|
||||
* either the BSD or the GPL.
|
||||
*/
|
||||
|
||||
/*
|
||||
* general notes about linux io_uring:
|
||||
*
|
||||
* a) it's the best interface I have seen so far. on linux.
|
||||
* b) best is not necessarily very good.
|
||||
* c) it's better than the aio mess, doesn't suffer from the fork problems
|
||||
* of linux aio or epoll and so on and so on. and you could do event stuff
|
||||
* without any syscalls. what's not to like?
|
||||
* d) ok, it's vastly more complex, but that's ok, really.
|
||||
* e) why 3 mmaps instead of one? one would be more space-efficient,
|
||||
* and I can't see what benefit three would have (other than being
|
||||
* somehow resizable/relocatable, but that's apparently not possible).
|
||||
* f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
|
||||
the bizarre way structure offsets are commuinicated makes it hard to
|
||||
* just print the ring buffer heads, even *iff* the memory were visible
|
||||
* in gdb. but then, that's also ok, really.
|
||||
* g) well, you cannot specify a timeout when waiting for events. no,
|
||||
* seriously, the interface doesn't support a timeout. never seen _that_
|
||||
* before. sure, you can use a timerfd, but that's another syscall
|
||||
* you could have avoided. overall, this bizarre omission smells
|
||||
* like a µ-optimisation by the io_uring author for his personal
|
||||
* applications, to the detriment of everybody else who just wants
|
||||
* an event loop. but, umm, ok, if that's all, it could be worse.
|
||||
* h) there is a hardcoded limit of 4096 outstanding events. okay,
|
||||
* at least there is no arbitrary low system-wide limit...
|
||||
* i) unlike linux aio, you *can* register more then the limit
|
||||
* of fd events, and the kernel will "gracefully" signal an
|
||||
* overflow, after which you could destroy and recreate the kernel
|
||||
* state, a bit bigger, or fall back to e.g. poll. thats not
|
||||
* totally insane, but kind of questions the point a high
|
||||
* performance I/O framework when it doesn't really work
|
||||
* under stress.
|
||||
* j) but, oh my! is has exactly the same bugs as the linux aio backend,
|
||||
* where some undocumented poll combinations just fail.
|
||||
* so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
|
||||
* this is completely undocumented, have I mantioned this already?
|
||||
* k) overall, the *API* itself is, I dare to say, not a total trainwreck.
|
||||
* the big isuess with it are the bugs requiring epoll, which might
|
||||
* or might not get fixed (do I hold my breath?).
|
||||
*/
|
||||
|
||||
#include <sys/timerfd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <poll.h>
|
||||
|
||||
#define IOURING_INIT_ENTRIES 32
|
||||
|
||||
/*****************************************************************************/
|
||||
/* syscall wrapdadoop - this section has the raw api/abi definitions */
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* mostly directly taken from the kernel or documentation */
|
||||
|
||||
struct io_uring_sqe
|
||||
{
|
||||
__u8 opcode;
|
||||
__u8 flags;
|
||||
__u16 ioprio;
|
||||
__s32 fd;
|
||||
__u64 off;
|
||||
__u64 addr;
|
||||
__u32 len;
|
||||
union {
|
||||
__kernel_rwf_t rw_flags;
|
||||
__u32 fsync_flags;
|
||||
__u16 poll_events;
|
||||
__u32 sync_range_flags;
|
||||
__u32 msg_flags;
|
||||
};
|
||||
__u64 user_data;
|
||||
union {
|
||||
__u16 buf_index;
|
||||
__u64 __pad2[3];
|
||||
};
|
||||
};
|
||||
|
||||
struct io_uring_cqe
|
||||
{
|
||||
__u64 user_data;
|
||||
__s32 res;
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct io_sqring_offsets
|
||||
{
|
||||
__u32 head;
|
||||
__u32 tail;
|
||||
__u32 ring_mask;
|
||||
__u32 ring_entries;
|
||||
__u32 flags;
|
||||
__u32 dropped;
|
||||
__u32 array;
|
||||
__u32 resv1;
|
||||
__u64 resv2;
|
||||
};
|
||||
|
||||
struct io_cqring_offsets
|
||||
{
|
||||
__u32 head;
|
||||
__u32 tail;
|
||||
__u32 ring_mask;
|
||||
__u32 ring_entries;
|
||||
__u32 overflow;
|
||||
__u32 cqes;
|
||||
__u64 resv[2];
|
||||
};
|
||||
|
||||
struct io_uring_params
|
||||
{
|
||||
__u32 sq_entries;
|
||||
__u32 cq_entries;
|
||||
__u32 flags;
|
||||
__u32 sq_thread_cpu;
|
||||
__u32 sq_thread_idle;
|
||||
__u32 resv[5];
|
||||
struct io_sqring_offsets sq_off;
|
||||
struct io_cqring_offsets cq_off;
|
||||
};
|
||||
|
||||
#define IORING_OP_POLL_ADD 6
|
||||
#define IORING_OP_POLL_REMOVE 7
|
||||
|
||||
#define IORING_ENTER_GETEVENTS 0x01
|
||||
|
||||
#define IORING_OFF_SQ_RING 0x00000000ULL
|
||||
#define IORING_OFF_CQ_RING 0x08000000ULL
|
||||
#define IORING_OFF_SQES 0x10000000ULL
|
||||
|
||||
inline_size
|
||||
int
|
||||
evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
|
||||
{
|
||||
return ev_syscall2 (SYS_io_uring_setup, entries, params);
|
||||
}
|
||||
|
||||
inline_size
|
||||
int
|
||||
evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
|
||||
{
|
||||
return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
/* actual backed implementation */
|
||||
|
||||
/* we hope that volatile will make the compiler access this variables only once */
|
||||
#define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
|
||||
#define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
|
||||
|
||||
/* the index array */
|
||||
#define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
|
||||
|
||||
/* the submit/completion queue entries */
|
||||
#define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
|
||||
#define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
|
||||
|
||||
static
|
||||
struct io_uring_sqe *
|
||||
iouring_sqe_get (EV_P)
|
||||
{
|
||||
unsigned tail = EV_SQ_VAR (tail);
|
||||
|
||||
if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries))
|
||||
{
|
||||
/* queue full, flush */
|
||||
evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0);
|
||||
iouring_to_submit = 0;
|
||||
}
|
||||
|
||||
assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));
|
||||
|
||||
return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
|
||||
}
|
||||
|
||||
inline_size
|
||||
struct io_uring_sqe *
|
||||
iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
|
||||
{
|
||||
unsigned idx = sqe - EV_SQES;
|
||||
|
||||
EV_SQ_ARRAY [idx] = idx;
|
||||
ECB_MEMORY_FENCE_RELEASE;
|
||||
++EV_SQ_VAR (tail);
|
||||
/*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
|
||||
++iouring_to_submit;
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
/* when the timerfd expires we simply note the fact,
|
||||
* as the purpose of the timerfd is to wake us up, nothing else.
|
||||
* the next iteration should re-set it.
|
||||
*/
|
||||
static void
|
||||
iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
|
||||
{
|
||||
iouring_tfd_to = EV_TSTAMP_HUGE;
|
||||
}
|
||||
|
||||
static void
|
||||
iouring_epoll_cb (EV_P_ struct ev_io *w, int revents)
|
||||
{
|
||||
epoll_poll (EV_A_ 0);
|
||||
}
|
||||
|
||||
/* called for full and partial cleanup */
|
||||
ecb_cold
|
||||
static int
|
||||
iouring_internal_destroy (EV_P)
|
||||
{
|
||||
close (iouring_tfd);
|
||||
close (iouring_fd);
|
||||
|
||||
if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
|
||||
if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
|
||||
if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
|
||||
|
||||
if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w);
|
||||
if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w );
|
||||
}
|
||||
|
||||
ecb_cold
|
||||
static int
|
||||
iouring_internal_init (EV_P)
|
||||
{
|
||||
struct io_uring_params params = { 0 };
|
||||
|
||||
iouring_to_submit = 0;
|
||||
|
||||
iouring_tfd = -1;
|
||||
iouring_sq_ring = MAP_FAILED;
|
||||
iouring_cq_ring = MAP_FAILED;
|
||||
iouring_sqes = MAP_FAILED;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
iouring_fd = evsys_io_uring_setup (iouring_entries, ¶ms);
|
||||
|
||||
if (iouring_fd >= 0)
|
||||
break; /* yippie */
|
||||
|
||||
if (errno != EINVAL)
|
||||
return -1; /* we failed */
|
||||
|
||||
/* EINVAL: lots of possible reasons, but maybe
|
||||
* it is because we hit the unqueryable hardcoded size limit
|
||||
*/
|
||||
|
||||
/* we hit the limit already, give up */
|
||||
if (iouring_max_entries)
|
||||
return -1;
|
||||
|
||||
/* first time we hit EINVAL? assume we hit the limit, so go back and retry */
|
||||
iouring_entries >>= 1;
|
||||
iouring_max_entries = iouring_entries;
|
||||
}
|
||||
|
||||
iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
|
||||
iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
|
||||
iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
|
||||
|
||||
iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
|
||||
iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
|
||||
iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
|
||||
|
||||
if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
|
||||
return -1;
|
||||
|
||||
iouring_sq_head = params.sq_off.head;
|
||||
iouring_sq_tail = params.sq_off.tail;
|
||||
iouring_sq_ring_mask = params.sq_off.ring_mask;
|
||||
iouring_sq_ring_entries = params.sq_off.ring_entries;
|
||||
iouring_sq_flags = params.sq_off.flags;
|
||||
iouring_sq_dropped = params.sq_off.dropped;
|
||||
iouring_sq_array = params.sq_off.array;
|
||||
|
||||
iouring_cq_head = params.cq_off.head;
|
||||
iouring_cq_tail = params.cq_off.tail;
|
||||
iouring_cq_ring_mask = params.cq_off.ring_mask;
|
||||
iouring_cq_ring_entries = params.cq_off.ring_entries;
|
||||
iouring_cq_overflow = params.cq_off.overflow;
|
||||
iouring_cq_cqes = params.cq_off.cqes;
|
||||
|
||||
iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
|
||||
|
||||
if (iouring_tfd < 0)
|
||||
return iouring_tfd;
|
||||
|
||||
iouring_tfd_to = EV_TSTAMP_HUGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ecb_cold
|
||||
static void
|
||||
iouring_fork (EV_P)
|
||||
{
|
||||
iouring_internal_destroy (EV_A);
|
||||
|
||||
while (iouring_internal_init (EV_A) < 0)
|
||||
ev_syserr ("(libev) io_uring_setup");
|
||||
|
||||
/* forking epoll should also effectively unregister all fds from the backend */
|
||||
epoll_fork (EV_A);
|
||||
/* epoll_fork already did this. hopefully */
|
||||
/*fd_rearm_all (EV_A);*/
|
||||
|
||||
ev_io_stop (EV_A_ &iouring_epoll_w);
|
||||
ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ);
|
||||
ev_io_start (EV_A_ &iouring_epoll_w);
|
||||
|
||||
ev_io_stop (EV_A_ &iouring_tfd_w);
|
||||
ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
|
||||
ev_io_start (EV_A_ &iouring_tfd_w);
|
||||
}
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
static void
|
||||
iouring_modify (EV_P_ int fd, int oev, int nev)
|
||||
{
|
||||
fprintf (stderr,"modify %d (%d, %d) %d\n", fd, oev,nev, anfds[fd].eflags);//D
|
||||
if (ecb_expect_false (anfds [fd].eflags))
|
||||
{
|
||||
/* we handed this fd over to epoll, so undo this first */
|
||||
/* we do it manually because the optimisations on epoll_modify won't do us any good */
|
||||
epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0);
|
||||
anfds [fd].eflags = 0;
|
||||
oev = 0;
|
||||
}
|
||||
|
||||
if (oev)
|
||||
{
|
||||
/* we assume the sqe's are all "properly" initialised */
|
||||
struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
|
||||
sqe->opcode = IORING_OP_POLL_REMOVE;
|
||||
sqe->fd = fd;
|
||||
sqe->user_data = -1;
|
||||
iouring_sqe_submit (EV_A_ sqe);
|
||||
}
|
||||
|
||||
/* increment generation counter to avoid handling old events */
|
||||
++anfds [fd].egen;
|
||||
|
||||
if (nev)
|
||||
{
|
||||
struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
|
||||
sqe->opcode = IORING_OP_POLL_ADD;
|
||||
sqe->fd = fd;
|
||||
sqe->user_data = (uint32_t)fd | ((__u64)anfds [fd].egen << 32);
|
||||
sqe->poll_events =
|
||||
(nev & EV_READ ? POLLIN : 0)
|
||||
| (nev & EV_WRITE ? POLLOUT : 0);
|
||||
iouring_sqe_submit (EV_A_ sqe);
|
||||
}
|
||||
}
|
||||
|
||||
inline_size
|
||||
void
|
||||
iouring_tfd_update (EV_P_ ev_tstamp timeout)
|
||||
{
|
||||
ev_tstamp tfd_to = mn_now + timeout;
|
||||
|
||||
/* we assume there will be many iterations per timer change, so
|
||||
* we only re-set the timerfd when we have to because its expiry
|
||||
* is too late.
|
||||
*/
|
||||
if (ecb_expect_false (tfd_to < iouring_tfd_to))
|
||||
{
|
||||
struct itimerspec its;
|
||||
|
||||
iouring_tfd_to = tfd_to;
|
||||
EV_TS_SET (its.it_interval, 0.);
|
||||
EV_TS_SET (its.it_value, tfd_to);
|
||||
|
||||
if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
|
||||
assert (("libev: iouring timerfd_settime failed", 0));
|
||||
}
|
||||
}
|
||||
|
||||
inline_size
|
||||
void
|
||||
iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
|
||||
{
|
||||
int fd = cqe->user_data & 0xffffffffU;
|
||||
uint32_t gen = cqe->user_data >> 32;
|
||||
int res = cqe->res;
|
||||
|
||||
/* ignore fd removal events, if there are any. TODO: verify */
|
||||
if (cqe->user_data == (__u64)-1)
|
||||
abort ();//D
|
||||
|
||||
assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
|
||||
|
||||
/* documentation lies, of course. the result value is NOT like
|
||||
* normal syscalls, but like linux raw syscalls, i.e. negative
|
||||
* error numbers. fortunate, as otherwise there would be no way
|
||||
* to get error codes at all. still, why not document this?
|
||||
*/
|
||||
|
||||
/* ignore event if generation doesn't match */
|
||||
/* this should actually be very rare */
|
||||
if (ecb_expect_false ((uint32_t)anfds [fd].egen != gen))
|
||||
return;
|
||||
|
||||
if (ecb_expect_false (res < 0))
|
||||
{
|
||||
if (res == -EINVAL)
|
||||
{
|
||||
/* we assume this error code means the fd/poll combination is buggy
|
||||
* and fall back to epoll.
|
||||
* this error code might also indicate a bug, but the kernel doesn't
|
||||
* distinguish between those two conditions, so... sigh...
|
||||
*/
|
||||
|
||||
epoll_modify (EV_A_ fd, 0, anfds [fd].events);
|
||||
}
|
||||
else if (res == -EBADF)
|
||||
{
|
||||
assert (("libev: event loop rejected bad fd", res != -EBADF));
|
||||
fd_kill (EV_A_ fd);
|
||||
}
|
||||
else
|
||||
{
|
||||
errno = -res;
|
||||
ev_syserr ("(libev) IORING_OP_POLL_ADD");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
fprintf (stderr, "fd %d event, rearm\n", fd);//D
|
||||
|
||||
/* feed events, we do not expect or handle POLLNVAL */
|
||||
fd_event (
|
||||
EV_A_
|
||||
fd,
|
||||
(res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
|
||||
| (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
|
||||
);
|
||||
|
||||
/* io_uring is oneshot, so we need to re-arm the fd next iteration */
|
||||
/* this also means we usually have to do at least one syscall per iteration */
|
||||
anfds [fd].events = 0;
|
||||
fd_change (EV_A_ fd, EV_ANFD_REIFY);
|
||||
}
|
||||
|
||||
/* called when the event queue overflows */
|
||||
ecb_cold
|
||||
static void
|
||||
iouring_overflow (EV_P)
|
||||
{
|
||||
/* we have two options, resize the queue (by tearing down
|
||||
* everything and recreating it, or living with it
|
||||
* and polling.
|
||||
* we implement this by resizing tghe queue, and, if that fails,
|
||||
* we just recreate the state on every failure, which
|
||||
* kind of is a very inefficient poll.
|
||||
* one danger is, due to the bios toward lower fds,
|
||||
* we will only really get events for those, so
|
||||
* maybe we need a poll() fallback, after all.
|
||||
*/
|
||||
/*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
|
||||
|
||||
fd_rearm_all (EV_A);
|
||||
|
||||
/* we double the size until we hit the hard-to-probe maximum */
|
||||
if (!iouring_max_entries)
|
||||
{
|
||||
iouring_entries <<= 1;
|
||||
iouring_fork (EV_A);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* we hit the kernel limit, we should fall back to something else.
|
||||
* we can either poll() a few times and hope for the best,
|
||||
* poll always, or switch to epoll.
|
||||
* since we use epoll anyways, go epoll.
|
||||
*/
|
||||
|
||||
iouring_internal_destroy (EV_A);
|
||||
|
||||
/* this should make it so that on return, we don'T call any uring functions */
|
||||
iouring_to_submit = 0;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
backend = epoll_init (EV_A_ 0);
|
||||
|
||||
if (backend)
|
||||
break;
|
||||
|
||||
ev_syserr ("(libev) iouring switch to epoll");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* handle any events in the completion queue, return true if there were any */
|
||||
static int
|
||||
iouring_handle_cq (EV_P)
|
||||
{
|
||||
unsigned head, tail, mask;
|
||||
|
||||
head = EV_CQ_VAR (head);
|
||||
ECB_MEMORY_FENCE_ACQUIRE;
|
||||
tail = EV_CQ_VAR (tail);
|
||||
|
||||
if (head == tail)
|
||||
return 0;
|
||||
|
||||
/* it can only overflow if we have events, yes, yes? */
|
||||
if (ecb_expect_false (EV_CQ_VAR (overflow)))
|
||||
{
|
||||
iouring_overflow (EV_A);
|
||||
return 1;
|
||||
}
|
||||
|
||||
mask = EV_CQ_VAR (ring_mask);
|
||||
|
||||
do
|
||||
iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
|
||||
while (head != tail);
|
||||
|
||||
EV_CQ_VAR (head) = head;
|
||||
ECB_MEMORY_FENCE_RELEASE;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
iouring_poll (EV_P_ ev_tstamp timeout)
|
||||
{
|
||||
/* if we have events, no need for extra syscalls, but we might have to queue events */
|
||||
if (iouring_handle_cq (EV_A))
|
||||
timeout = 0.;
|
||||
else
|
||||
/* no events, so maybe wait for some */
|
||||
iouring_tfd_update (EV_A_ timeout);
|
||||
|
||||
/* only enter the kernel if we have somethign to submit, or we need to wait */
|
||||
if (timeout || iouring_to_submit)
|
||||
{
|
||||
int res;
|
||||
|
||||
EV_RELEASE_CB;
|
||||
|
||||
res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
|
||||
timeout ? IORING_ENTER_GETEVENTS : 0, 0, 0);
|
||||
iouring_to_submit = 0;
|
||||
|
||||
EV_ACQUIRE_CB;
|
||||
|
||||
if (ecb_expect_false (res < 0))
|
||||
if (errno == EINTR)
|
||||
/* ignore */;
|
||||
else
|
||||
ev_syserr ("(libev) iouring setup");
|
||||
else
|
||||
iouring_handle_cq (EV_A);
|
||||
}
|
||||
}
|
||||
|
||||
inline_size
|
||||
int
|
||||
iouring_init (EV_P_ int flags)
|
||||
{
|
||||
if (!epoll_init (EV_A_ 0))
|
||||
return 0;
|
||||
|
||||
ev_io_init (EV_A_ &iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ);
|
||||
ev_set_priority (&iouring_epoll_w, EV_MAXPRI);
|
||||
|
||||
ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
|
||||
ev_set_priority (&iouring_tfd_w, EV_MAXPRI);
|
||||
|
||||
iouring_entries = IOURING_INIT_ENTRIES;
|
||||
iouring_max_entries = 0;
|
||||
|
||||
if (iouring_internal_init (EV_A) < 0)
|
||||
{
|
||||
iouring_internal_destroy (EV_A);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ev_io_start (EV_A_ &iouring_epoll_w);
|
||||
ev_unref (EV_A); /* watcher should not keep loop alive */
|
||||
|
||||
ev_io_start (EV_A_ &iouring_tfd_w);
|
||||
ev_unref (EV_A); /* watcher should not keep loop alive */
|
||||
|
||||
backend_modify = iouring_modify;
|
||||
backend_poll = iouring_poll;
|
||||
|
||||
return EVBACKEND_IOURING;
|
||||
}
|
||||
|
||||
inline_size
|
||||
void
|
||||
iouring_destroy (EV_P)
|
||||
{
|
||||
iouring_internal_destroy (EV_A);
|
||||
epoll_destroy (EV_A);
|
||||
}
|
||||
|
|
@ -323,19 +323,18 @@ static int
|
|||
linuxaio_get_events_from_ring (EV_P)
|
||||
{
|
||||
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx;
|
||||
unsigned head, tail;
|
||||
|
||||
/* the kernel reads and writes both of these variables, */
|
||||
/* as a C extension, we assume that volatile use here */
|
||||
/* both makes reads atomic and once-only */
|
||||
unsigned head = *(volatile unsigned *)&ring->head;
|
||||
unsigned tail = *(volatile unsigned *)&ring->tail;
|
||||
head = *(volatile unsigned *)&ring->head;
|
||||
ECB_MEMORY_FENCE_ACQUIRE;
|
||||
tail = *(volatile unsigned *)&ring->tail;
|
||||
|
||||
if (head == tail)
|
||||
return 0;
|
||||
|
||||
/* make sure the events up to tail are visible */
|
||||
ECB_MEMORY_FENCE_ACQUIRE;
|
||||
|
||||
/* parse all available events, but only once, to avoid starvation */
|
||||
if (tail > head) /* normal case around */
|
||||
linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head);
|
||||
|
@ -396,9 +395,7 @@ linuxaio_get_events (EV_P_ ev_tstamp timeout)
|
|||
|
||||
EV_RELEASE_CB;
|
||||
|
||||
ts.tv_sec = (long)timeout;
|
||||
ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9);
|
||||
|
||||
EV_TS_SET (ts, timeout);
|
||||
res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts);
|
||||
|
||||
EV_ACQUIRE_CB;
|
||||
|
@ -494,11 +491,16 @@ linuxaio_poll (EV_P_ ev_tstamp timeout)
|
|||
++linuxaio_iteration;
|
||||
if (linuxaio_io_setup (EV_A) < 0)
|
||||
{
|
||||
/* TODO: rearm all and recreate epoll backend from scratch */
|
||||
/* TODO: might be more prudent? */
|
||||
|
||||
/* to bad, we can't get a new aio context, go 100% epoll */
|
||||
linuxaio_free_iocbp (EV_A);
|
||||
ev_io_stop (EV_A_ &linuxaio_epoll_w);
|
||||
ev_ref (EV_A);
|
||||
linuxaio_ctx = 0;
|
||||
|
||||
backend = EVBACKEND_EPOLL;
|
||||
backend_modify = epoll_modify;
|
||||
backend_poll = epoll_poll;
|
||||
}
|
||||
|
@ -517,7 +519,10 @@ linuxaio_poll (EV_P_ ev_tstamp timeout)
|
|||
else if (errno == EINTR) /* not seen in reality, not documented */
|
||||
res = 0; /* silently ignore and retry */
|
||||
else
|
||||
ev_syserr ("(libev) linuxaio io_submit");
|
||||
{
|
||||
ev_syserr ("(libev) linuxaio io_submit");
|
||||
res = 0;
|
||||
}
|
||||
|
||||
submitted += res;
|
||||
}
|
||||
|
@ -555,8 +560,8 @@ linuxaio_init (EV_P_ int flags)
|
|||
ev_io_start (EV_A_ &linuxaio_epoll_w);
|
||||
ev_unref (EV_A); /* watcher should not keep loop alive */
|
||||
|
||||
backend_modify = linuxaio_modify;
|
||||
backend_poll = linuxaio_poll;
|
||||
backend_modify = linuxaio_modify;
|
||||
backend_poll = linuxaio_poll;
|
||||
|
||||
linuxaio_iocbpmax = 0;
|
||||
linuxaio_iocbps = 0;
|
||||
|
@ -577,8 +582,8 @@ linuxaio_destroy (EV_P)
|
|||
evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */
|
||||
}
|
||||
|
||||
inline_size
|
||||
void
|
||||
ecb_cold
|
||||
static void
|
||||
linuxaio_fork (EV_P)
|
||||
{
|
||||
/* this frees all iocbs, which is very heavy-handed */
|
||||
|
@ -592,12 +597,11 @@ linuxaio_fork (EV_P)
|
|||
|
||||
/* forking epoll should also effectively unregister all fds from the backend */
|
||||
epoll_fork (EV_A);
|
||||
/* epoll_fork already did this. hopefully */
|
||||
/*fd_rearm_all (EV_A);*/
|
||||
|
||||
ev_io_stop (EV_A_ &linuxaio_epoll_w);
|
||||
ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ);
|
||||
ev_io_start (EV_A_ &linuxaio_epoll_w);
|
||||
|
||||
/* epoll_fork already did this. hopefully */
|
||||
/*fd_rearm_all (EV_A);*/
|
||||
}
|
||||
|
||||
|
|
30
ev_vars.h
30
ev_vars.h
|
@ -118,6 +118,36 @@ VARx(int, linuxaio_submitmax)
|
|||
VARx(ev_io, linuxaio_epoll_w)
|
||||
#endif
|
||||
|
||||
#if EV_USE_IOURING || EV_GENWRAP
|
||||
VARx(int, iouring_fd)
|
||||
VARx(unsigned, iouring_to_submit);
|
||||
VARx(int, iouring_entries)
|
||||
VARx(int, iouring_max_entries)
|
||||
VARx(void *, iouring_sq_ring)
|
||||
VARx(void *, iouring_cq_ring)
|
||||
VARx(void *, iouring_sqes)
|
||||
VARx(uint32_t, iouring_sq_ring_size)
|
||||
VARx(uint32_t, iouring_cq_ring_size)
|
||||
VARx(uint32_t, iouring_sqes_size)
|
||||
VARx(uint32_t, iouring_sq_head)
|
||||
VARx(uint32_t, iouring_sq_tail)
|
||||
VARx(uint32_t, iouring_sq_ring_mask)
|
||||
VARx(uint32_t, iouring_sq_ring_entries)
|
||||
VARx(uint32_t, iouring_sq_flags)
|
||||
VARx(uint32_t, iouring_sq_dropped)
|
||||
VARx(uint32_t, iouring_sq_array)
|
||||
VARx(uint32_t, iouring_cq_head)
|
||||
VARx(uint32_t, iouring_cq_tail)
|
||||
VARx(uint32_t, iouring_cq_ring_mask)
|
||||
VARx(uint32_t, iouring_cq_ring_entries)
|
||||
VARx(uint32_t, iouring_cq_overflow)
|
||||
VARx(uint32_t, iouring_cq_cqes)
|
||||
VARx(ev_tstamp, iouring_tfd_to)
|
||||
VARx(int, iouring_tfd)
|
||||
VARx(ev_io, iouring_tfd_w)
|
||||
VARx(ev_io, iouring_epoll_w)
|
||||
#endif
|
||||
|
||||
#if EV_USE_KQUEUE || EV_GENWRAP
|
||||
VARx(pid_t, kqueue_fd_pid)
|
||||
VARx(struct kevent *, kqueue_changes)
|
||||
|
|
54
ev_wrap.h
54
ev_wrap.h
|
@ -44,6 +44,33 @@
|
|||
#define invoke_cb ((loop)->invoke_cb)
|
||||
#define io_blocktime ((loop)->io_blocktime)
|
||||
#define iocp ((loop)->iocp)
|
||||
#define iouring_cq_cqes ((loop)->iouring_cq_cqes)
|
||||
#define iouring_cq_head ((loop)->iouring_cq_head)
|
||||
#define iouring_cq_overflow ((loop)->iouring_cq_overflow)
|
||||
#define iouring_cq_ring ((loop)->iouring_cq_ring)
|
||||
#define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries)
|
||||
#define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask)
|
||||
#define iouring_cq_ring_size ((loop)->iouring_cq_ring_size)
|
||||
#define iouring_cq_tail ((loop)->iouring_cq_tail)
|
||||
#define iouring_entries ((loop)->iouring_entries)
|
||||
#define iouring_epoll_w ((loop)->iouring_epoll_w)
|
||||
#define iouring_fd ((loop)->iouring_fd)
|
||||
#define iouring_max_entries ((loop)->iouring_max_entries)
|
||||
#define iouring_sq_array ((loop)->iouring_sq_array)
|
||||
#define iouring_sq_dropped ((loop)->iouring_sq_dropped)
|
||||
#define iouring_sq_flags ((loop)->iouring_sq_flags)
|
||||
#define iouring_sq_head ((loop)->iouring_sq_head)
|
||||
#define iouring_sq_ring ((loop)->iouring_sq_ring)
|
||||
#define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries)
|
||||
#define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask)
|
||||
#define iouring_sq_ring_size ((loop)->iouring_sq_ring_size)
|
||||
#define iouring_sq_tail ((loop)->iouring_sq_tail)
|
||||
#define iouring_sqes ((loop)->iouring_sqes)
|
||||
#define iouring_sqes_size ((loop)->iouring_sqes_size)
|
||||
#define iouring_tfd ((loop)->iouring_tfd)
|
||||
#define iouring_tfd_to ((loop)->iouring_tfd_to)
|
||||
#define iouring_tfd_w ((loop)->iouring_tfd_w)
|
||||
#define iouring_to_submit ((loop)->iouring_to_submit)
|
||||
#define kqueue_changecnt ((loop)->kqueue_changecnt)
|
||||
#define kqueue_changemax ((loop)->kqueue_changemax)
|
||||
#define kqueue_changes ((loop)->kqueue_changes)
|
||||
|
@ -151,6 +178,33 @@
|
|||
#undef invoke_cb
|
||||
#undef io_blocktime
|
||||
#undef iocp
|
||||
#undef iouring_cq_cqes
|
||||
#undef iouring_cq_head
|
||||
#undef iouring_cq_overflow
|
||||
#undef iouring_cq_ring
|
||||
#undef iouring_cq_ring_entries
|
||||
#undef iouring_cq_ring_mask
|
||||
#undef iouring_cq_ring_size
|
||||
#undef iouring_cq_tail
|
||||
#undef iouring_entries
|
||||
#undef iouring_epoll_w
|
||||
#undef iouring_fd
|
||||
#undef iouring_max_entries
|
||||
#undef iouring_sq_array
|
||||
#undef iouring_sq_dropped
|
||||
#undef iouring_sq_flags
|
||||
#undef iouring_sq_head
|
||||
#undef iouring_sq_ring
|
||||
#undef iouring_sq_ring_entries
|
||||
#undef iouring_sq_ring_mask
|
||||
#undef iouring_sq_ring_size
|
||||
#undef iouring_sq_tail
|
||||
#undef iouring_sqes
|
||||
#undef iouring_sqes_size
|
||||
#undef iouring_tfd
|
||||
#undef iouring_tfd_to
|
||||
#undef iouring_tfd_w
|
||||
#undef iouring_to_submit
|
||||
#undef kqueue_changecnt
|
||||
#undef kqueue_changemax
|
||||
#undef kqueue_changes
|
||||
|
|
Loading…
Reference in New Issue