*** empty log message ***

This commit is contained in:
Marc Alexander Lehmann 2019-06-20 22:44:59 +00:00
parent 9c2c08ed07
commit 0ce071c104
10 changed files with 494 additions and 83 deletions

View File

@ -1,7 +1,10 @@
Revision history for libev, a high-performance and full-featured event loop.
- new experimental linux aio backend (linux 4.18+).
- removed redundant 0-ptr check in ev_once.
- updated/extended ev_set_allocator documentation.
- replaced EMPTY2 macro by array_needsize_noinit.
- minor code cleanups.
4.25 Fri Dec 21 07:49:20 CET 2018
- INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT

82
ev.3
View File

@ -1,4 +1,4 @@
.\" Automatically generated by Pod::Man 2.28 (Pod::Simple 3.29)
.\" Automatically generated by Pod::Man 4.11 (Pod::Simple 3.35)
.\"
.\" Standard preamble:
.\" ========================================================================
@ -46,7 +46,7 @@
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" If the F register is >0, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.SS), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
@ -56,12 +56,12 @@
..
.nr rF 0
.if \n(.g .if rF .nr rF 1
.if (\n(rF:(\n(.g==0)) \{
. if \nF \{
.if (\n(rF:(\n(.g==0)) \{\
. if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. if !\nF==2 \{
. if !\nF==2 \{\
. nr % 0
. nr F 2
. \}
@ -133,7 +133,7 @@
.\" ========================================================================
.\"
.IX Title "LIBEV 3"
.TH LIBEV 3 "2018-12-21" "libev-4.25" "libev - high performance full featured event loop"
.TH LIBEV 3 "2019-06-20" "libev-4.25" "libev - high performance full featured event loop"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@ -394,13 +394,35 @@ You could override this function in high-availability programs to, say,
free some memory if it cannot allocate memory, to use a special allocator,
or even to sleep a while and retry until some memory is available.
.Sp
Example: Replace the libev allocator with one that waits a bit and then
retries (example requires a standards-compliant \f(CW\*(C`realloc\*(C'\fR).
Example: The following is the \f(CW\*(C`realloc\*(C'\fR function that libev itself uses
which should work with \f(CW\*(C`realloc\*(C'\fR and \f(CW\*(C`free\*(C'\fR functions of all kinds and
is probably a good basis for your own implementation.
.Sp
.Vb 6
.Vb 5
\& static void *
\& ev_realloc_emul (void *ptr, long size) EV_NOEXCEPT
\& {
\& if (size)
\& return realloc (ptr, size);
\&
\& free (ptr);
\& return 0;
\& }
.Ve
.Sp
Example: Replace the libev allocator with one that waits a bit and then
retries.
.Sp
.Vb 8
\& static void *
\& persistent_realloc (void *ptr, size_t size)
\& {
\& if (!size)
\& {
\& free (ptr);
\& return 0;
\& }
\&
\& for (;;)
\& {
\& void *newptr = realloc (ptr, size);
@ -587,7 +609,7 @@ This flag's behaviour will become the default in future versions of libev.
.ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4
.el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4
.IX Item "EVBACKEND_SELECT (value 1, portable select backend)"
This is your standard \fIselect\fR\|(2) backend. Not \fIcompletely\fR standard, as
This is your standard \fBselect\fR\|(2) backend. Not \fIcompletely\fR standard, as
libev tries to roll its own fd_set with no limits on the number of fds,
but if that fails, expect a fairly low limit on the number of fds when
using this backend. It doesn't scale too well (O(highest_fd)), but its
@ -606,7 +628,7 @@ This backend maps \f(CW\*(C`EV_READ\*(C'\fR to the \f(CW\*(C`readfds\*(C'\fR set
.ie n .IP """EVBACKEND_POLL"" (value 2, poll backend, available everywhere except on windows)" 4
.el .IP "\f(CWEVBACKEND_POLL\fR (value 2, poll backend, available everywhere except on windows)" 4
.IX Item "EVBACKEND_POLL (value 2, poll backend, available everywhere except on windows)"
And this is your standard \fIpoll\fR\|(2) backend. It's more complicated
And this is your standard \fBpoll\fR\|(2) backend. It's more complicated
than select, but handles sparse fds better and has no artificial
limit on the number of fds you can use (except it will slow down
considerably with a lot of inactive fds). It scales similarly to select,
@ -618,7 +640,7 @@ This backend maps \f(CW\*(C`EV_READ\*(C'\fR to \f(CW\*(C`POLLIN | POLLERR | POLL
.ie n .IP """EVBACKEND_EPOLL"" (value 4, Linux)" 4
.el .IP "\f(CWEVBACKEND_EPOLL\fR (value 4, Linux)" 4
.IX Item "EVBACKEND_EPOLL (value 4, Linux)"
Use the linux-specific \fIepoll\fR\|(7) interface (for both pre\- and post\-2.6.9
Use the linux-specific \fBepoll\fR\|(7) interface (for both pre\- and post\-2.6.9
kernels).
.Sp
For few fds, this backend is a bit little slower than poll and select, but
@ -1367,7 +1389,7 @@ bug in your program.
Libev will usually signal a few \*(L"dummy\*(R" events together with an error, for
example it might indicate that a fd is readable or writable, and if your
callbacks is well-written it can just attempt the operation and cope with
the error from \fIread()\fR or \fIwrite()\fR. This will not work in multi-threaded
the error from \fBread()\fR or \fBwrite()\fR. This will not work in multi-threaded
programs, though, as the fd could already be closed and reused for another
thing, so beware.
.SS "\s-1GENERIC WATCHER FUNCTIONS\s0"
@ -1813,13 +1835,13 @@ sent a \s-1SIGPIPE,\s0 which, by default, aborts your program. For most programs
this is sensible behaviour, for daemons, this is usually undesirable.
.PP
So when you encounter spurious, unexplained daemon exits, make sure you
ignore \s-1SIGPIPE \s0(and maybe make sure you log the exit status of your daemon
ignore \s-1SIGPIPE\s0 (and maybe make sure you log the exit status of your daemon
somewhere, as that would have given you a big clue).
.PP
\fIThe special problem of \fIaccept()\fIing when you can't\fR
\fIThe special problem of \f(BIaccept()\fIing when you can't\fR
.IX Subsection "The special problem of accept()ing when you can't"
.PP
Many implementations of the \s-1POSIX \s0\f(CW\*(C`accept\*(C'\fR function (for example,
Many implementations of the \s-1POSIX\s0 \f(CW\*(C`accept\*(C'\fR function (for example,
found in post\-2004 Linux) have the peculiar behaviour of not removing a
connection from the pending queue in all error cases.
.PP
@ -2436,7 +2458,7 @@ ignored. Instead, each time the periodic watcher gets scheduled, the
reschedule callback will be called with the watcher as first, and the
current time as second argument.
.Sp
\&\s-1NOTE: \s0\fIThis callback \s-1MUST NOT\s0 stop or destroy any periodic watcher, ever,
\&\s-1NOTE:\s0 \fIThis callback \s-1MUST NOT\s0 stop or destroy any periodic watcher, ever,
or make \s-1ANY\s0 other event loop modifications whatsoever, unless explicitly
allowed by documentation here\fR.
.Sp
@ -2460,7 +2482,7 @@ It must return the next time to trigger, based on the passed time value
will usually be called just before the callback will be triggered, but
might be called at other times, too.
.Sp
\&\s-1NOTE: \s0\fIThis callback must always return a time that is higher than or
\&\s-1NOTE:\s0 \fIThis callback must always return a time that is higher than or
equal to the passed \f(CI\*(C`now\*(C'\fI value\fR.
.Sp
This can be used to create very complex timers, such as a timer that
@ -2615,7 +2637,7 @@ to install a fork handler with \f(CW\*(C`pthread_atfork\*(C'\fR that resets it.
catch fork calls done by libraries (such as the libc) as well.
.PP
In current versions of libev, the signal will not be blocked indefinitely
unless you use the \f(CW\*(C`signalfd\*(C'\fR \s-1API \s0(\f(CW\*(C`EV_SIGNALFD\*(C'\fR). While this reduces
unless you use the \f(CW\*(C`signalfd\*(C'\fR \s-1API\s0 (\f(CW\*(C`EV_SIGNALFD\*(C'\fR). While this reduces
the window of opportunity for problems, it will not go away, as libev
\&\fIhas\fR to modify the signal mask, at least temporarily.
.PP
@ -4520,7 +4542,7 @@ configuration (no autoconf):
.PP
This will automatically include \fIev.h\fR, too, and should be done in a
single C source file only to provide the function implementations. To use
it, do the same for \fIev.h\fR in all files wishing to use this \s-1API \s0(best
it, do the same for \fIev.h\fR in all files wishing to use this \s-1API\s0 (best
done by writing a wrapper around \fIev.h\fR that you can include instead and
where you can put other configuration options):
.PP
@ -4603,7 +4625,7 @@ to redefine them before including \fIev.h\fR without breaking compatibility
to a compiled library. All other symbols change the \s-1ABI,\s0 which means all
users of libev and the libev code itself must be compiled with compatible
settings.
.IP "\s-1EV_COMPAT3 \s0(h)" 4
.IP "\s-1EV_COMPAT3\s0 (h)" 4
.IX Item "EV_COMPAT3 (h)"
Backwards compatibility is a major concern for libev. This is why this
release of libev comes with wrappers for the functions and symbols that
@ -4618,7 +4640,7 @@ typedef in that case.
In some future version, the default for \f(CW\*(C`EV_COMPAT3\*(C'\fR will become \f(CW0\fR,
and in some even more future version the compatibility code will be
removed completely.
.IP "\s-1EV_STANDALONE \s0(h)" 4
.IP "\s-1EV_STANDALONE\s0 (h)" 4
.IX Item "EV_STANDALONE (h)"
Must always be \f(CW1\fR if you do not use autoconf configuration, which
keeps libev from including \fIconfig.h\fR, and it also defines dummy
@ -4786,21 +4808,21 @@ watchers.
.Sp
In the absence of this define, libev will use \f(CW\*(C`sig_atomic_t volatile\*(C'\fR
(from \fIsignal.h\fR), which is usually good enough on most platforms.
.IP "\s-1EV_H \s0(h)" 4
.IP "\s-1EV_H\s0 (h)" 4
.IX Item "EV_H (h)"
The name of the \fIev.h\fR header file used to include it. The default if
undefined is \f(CW"ev.h"\fR in \fIevent.h\fR, \fIev.c\fR and \fIev++.h\fR. This can be
used to virtually rename the \fIev.h\fR header file in case of conflicts.
.IP "\s-1EV_CONFIG_H \s0(h)" 4
.IP "\s-1EV_CONFIG_H\s0 (h)" 4
.IX Item "EV_CONFIG_H (h)"
If \f(CW\*(C`EV_STANDALONE\*(C'\fR isn't \f(CW1\fR, this variable can be used to override
\&\fIev.c\fR's idea of where to find the \fIconfig.h\fR file, similarly to
\&\f(CW\*(C`EV_H\*(C'\fR, above.
.IP "\s-1EV_EVENT_H \s0(h)" 4
.IP "\s-1EV_EVENT_H\s0 (h)" 4
.IX Item "EV_EVENT_H (h)"
Similarly to \f(CW\*(C`EV_H\*(C'\fR, this macro can be used to override \fIevent.c\fR's idea
of how the \fIevent.h\fR header can be found, the default is \f(CW"event.h"\fR.
.IP "\s-1EV_PROTOTYPES \s0(h)" 4
.IP "\s-1EV_PROTOTYPES\s0 (h)" 4
.IX Item "EV_PROTOTYPES (h)"
If defined to be \f(CW0\fR, then \fIev.h\fR will not define any function
prototypes, but still define all the structs and other symbols. This is
@ -5019,10 +5041,10 @@ For example, the perl \s-1EV\s0 module uses something like this:
\& SV *self; /* contains this struct */ \e
\& SV *cb_sv, *fh /* note no trailing ";" */
.Ve
.IP "\s-1EV_CB_DECLARE \s0(type)" 4
.IP "\s-1EV_CB_DECLARE\s0 (type)" 4
.IX Item "EV_CB_DECLARE (type)"
.PD 0
.IP "\s-1EV_CB_INVOKE \s0(watcher, revents)" 4
.IP "\s-1EV_CB_INVOKE\s0 (watcher, revents)" 4
.IX Item "EV_CB_INVOKE (watcher, revents)"
.IP "ev_set_cb (ev, cb)" 4
.IX Item "ev_set_cb (ev, cb)"
@ -5035,7 +5057,7 @@ avoid the \f(CW\*(C`struct ev_loop *\*(C'\fR as first argument in all cases, or
method calls instead of plain function calls in \*(C+.
.SS "\s-1EXPORTED API SYMBOLS\s0"
.IX Subsection "EXPORTED API SYMBOLS"
If you need to re-export the \s-1API \s0(e.g. via a \s-1DLL\s0) and you need a list of
If you need to re-export the \s-1API\s0 (e.g. via a \s-1DLL\s0) and you need a list of
exported symbols, you can use the provided \fISymbol.*\fR files which list
all public symbols, one per line:
.PP
@ -5277,7 +5299,7 @@ a loop.
.IX Subsection "select is buggy"
.PP
All that's left is \f(CW\*(C`select\*(C'\fR, and of course Apple found a way to fuck this
one up as well: On \s-1OS/X, \s0\f(CW\*(C`select\*(C'\fR actively limits the number of file
one up as well: On \s-1OS/X,\s0 \f(CW\*(C`select\*(C'\fR actively limits the number of file
descriptors you can pass in to 1024 \- your program suddenly crashes when
you use more.
.PP

96
ev.c
View File

@ -1,7 +1,7 @@
/*
* libev event processing core, watcher management
*
* Copyright (c) 2007-2018 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -317,6 +317,10 @@
# define EV_USE_PORT 0
#endif
#ifndef EV_USE_LINUXAIO
# define EV_USE_LINUXAIO 0
#endif
#ifndef EV_USE_INOTIFY
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
# define EV_USE_INOTIFY EV_FEATURE_OS
@ -383,6 +387,10 @@
# define EV_USE_POLL 0
#endif
#if EV_USE_LINUXAIO
# include <linux/aio_abi.h> /* probably only needed for aio_context_t */
#endif
/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
/* which makes programs even slower. might work on other unices, too. */
#if EV_USE_CLOCK_SYSCALL
@ -1545,8 +1553,7 @@ ecb_binary32_to_binary16 (uint32_t x)
# define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
#endif
#define EMPTY /* required for microsofts broken pseudo-c compiler */
#define EMPTY2(a,b) /* used to suppress some warnings */
#define EMPTY /* required for microsofts broken pseudo-c compiler */
typedef ev_watcher *W;
typedef ev_watcher_list *WL;
@ -1772,7 +1779,7 @@ typedef struct
WL head;
unsigned char events; /* the events watched for */
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
unsigned char emask; /* some backends store the actual kernel mask in here */
unsigned char unused;
#if EV_USE_EPOLL
unsigned int egen; /* generation counter to counter epoll bugs */
@ -1963,7 +1970,9 @@ array_realloc (int elem, void *base, int *cur, int cnt)
return ev_realloc (base, elem * *cur);
}
#define array_init_zero(base,count) \
#define array_needsize_noinit(base,count)
#define array_needsize_zerofill(base,count) \
memset ((void *)(base), 0, sizeof (*(base)) * (count))
#define array_needsize(type,base,cur,cnt,init) \
@ -2009,7 +2018,7 @@ ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
else
{
w_->pending = ++pendingcnt [pri];
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, array_needsize_noinit);
pendings [pri][w_->pending - 1].w = w_;
pendings [pri][w_->pending - 1].events = revents;
}
@ -2020,7 +2029,7 @@ ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
inline_speed void
feed_reverse (EV_P_ W w)
{
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, array_needsize_noinit);
rfeeds [rfeedcnt++] = w;
}
@ -2148,7 +2157,7 @@ fd_change (EV_P_ int fd, int flags)
if (expect_true (!reify))
{
++fdchangecnt;
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
fdchanges [fdchangecnt - 1] = fd;
}
}
@ -2705,6 +2714,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
#if EV_USE_KQUEUE
# include "ev_kqueue.c"
#endif
#if EV_USE_LINUXAIO
# include "ev_linuxaio.c"
#endif
#if EV_USE_EPOLL
# include "ev_epoll.c"
#endif
@ -2745,11 +2757,12 @@ ev_supported_backends (void) EV_NOEXCEPT
{
unsigned int flags = 0;
if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
if (EV_USE_PORT ) flags |= EVBACKEND_PORT;
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
return flags;
}
@ -2918,22 +2931,25 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
flags |= ev_recommended_backends ();
#if EV_USE_IOCP
if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_IOCP )) backend = iocp_init (EV_A_ flags);
#endif
#if EV_USE_PORT
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_PORT )) backend = port_init (EV_A_ flags);
#endif
#if EV_USE_KQUEUE
if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
#endif
#if EV_USE_LINUXAIO
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
#endif
#if EV_USE_EPOLL
if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init (EV_A_ flags);
#endif
#if EV_USE_POLL
if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_POLL )) backend = poll_init (EV_A_ flags);
#endif
#if EV_USE_SELECT
if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
if (!backend && (flags & EVBACKEND_SELECT )) backend = select_init (EV_A_ flags);
#endif
ev_prepare_init (&pending_w, pendingcb);
@ -2998,22 +3014,25 @@ ev_loop_destroy (EV_P)
close (backend_fd);
#if EV_USE_IOCP
if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
if (backend == EVBACKEND_IOCP ) iocp_destroy (EV_A);
#endif
#if EV_USE_PORT
if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
if (backend == EVBACKEND_PORT ) port_destroy (EV_A);
#endif
#if EV_USE_KQUEUE
if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
#endif
#if EV_USE_LINUXAIO
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
#endif
#if EV_USE_EPOLL
if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
if (backend == EVBACKEND_EPOLL ) epoll_destroy (EV_A);
#endif
#if EV_USE_POLL
if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
if (backend == EVBACKEND_POLL ) poll_destroy (EV_A);
#endif
#if EV_USE_SELECT
if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
if (backend == EVBACKEND_SELECT ) select_destroy (EV_A);
#endif
for (i = NUMPRI; i--; )
@ -3065,13 +3084,16 @@ inline_size void
loop_fork (EV_P)
{
#if EV_USE_PORT
if (backend == EVBACKEND_PORT ) port_fork (EV_A);
if (backend == EVBACKEND_PORT ) port_fork (EV_A);
#endif
#if EV_USE_KQUEUE
if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
#endif
#if EV_USE_LINUXAIO
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
#endif
#if EV_USE_EPOLL
if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
if (backend == EVBACKEND_EPOLL ) epoll_fork (EV_A);
#endif
#if EV_USE_INOTIFY
infy_fork (EV_A);
@ -3878,7 +3900,7 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, 1);
array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
array_needsize (ANFD, anfds, anfdmax, fd + 1, array_needsize_zerofill);
wlist_add (&anfds[fd].head, (WL)w);
/* common bug, apparently */
@ -3925,7 +3947,7 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
++timercnt;
ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
array_needsize (ANHE, timers, timermax, ev_active (w) + 1, array_needsize_noinit);
ANHE_w (timers [ev_active (w)]) = (WT)w;
ANHE_at_cache (timers [ev_active (w)]);
upheap (timers, ev_active (w));
@ -4022,7 +4044,7 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
++periodiccnt;
ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, array_needsize_noinit);
ANHE_w (periodics [ev_active (w)]) = (WT)w;
ANHE_at_cache (periodics [ev_active (w)]);
upheap (periodics, ev_active (w));
@ -4617,7 +4639,7 @@ ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
++idleall;
ev_start (EV_A_ (W)w, active);
array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, array_needsize_noinit);
idles [ABSPRI (w)][active - 1] = w;
}
@ -4657,7 +4679,7 @@ ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, ++preparecnt);
array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
array_needsize (ev_prepare *, prepares, preparemax, preparecnt, array_needsize_noinit);
prepares [preparecnt - 1] = w;
EV_FREQUENT_CHECK;
@ -4695,7 +4717,7 @@ ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, ++checkcnt);
array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
array_needsize (ev_check *, checks, checkmax, checkcnt, array_needsize_noinit);
checks [checkcnt - 1] = w;
EV_FREQUENT_CHECK;
@ -4843,7 +4865,7 @@ ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, ++forkcnt);
array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
array_needsize (ev_fork *, forks, forkmax, forkcnt, array_needsize_noinit);
forks [forkcnt - 1] = w;
EV_FREQUENT_CHECK;
@ -4881,7 +4903,7 @@ ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, ++cleanupcnt);
array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, EMPTY2);
array_needsize (ev_cleanup *, cleanups, cleanupmax, cleanupcnt, array_needsize_noinit);
cleanups [cleanupcnt - 1] = w;
/* cleanup watchers should never keep a refcount on the loop */
@ -4926,7 +4948,7 @@ ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
ev_start (EV_A_ (W)w, ++asynccnt);
array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
array_needsize (ev_async *, asyncs, asyncmax, asynccnt, array_needsize_noinit);
asyncs [asynccnt - 1] = w;
EV_FREQUENT_CHECK;

19
ev.h
View File

@ -1,7 +1,7 @@
/*
* libev native API header
*
* Copyright (c) 2007-2018 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007-2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -516,14 +516,15 @@ enum {
/* method bits to be ored together */
enum {
EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */
EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */
EVBACKEND_EPOLL = 0x00000004U, /* linux */
EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
EVBACKEND_ALL = 0x0000003FU, /* all known backends */
EVBACKEND_MASK = 0x0000FFFFU /* all future backends */
EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */
EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */
EVBACKEND_EPOLL = 0x00000004U, /* linux */
EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
EVBACKEND_LINUXAIO = 0x00000040U, /* Linuix AIO */
EVBACKEND_ALL = 0x0000007FU, /* all known backends */
EVBACKEND_MASK = 0x0000FFFFU /* all future backends */
};
#if EV_PROTOTYPES

View File

@ -1,7 +1,7 @@
/*
* libev epoll fd activity backend
*
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011,2016,2017,2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -124,7 +124,7 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
/* add fd to epoll_eperms, if not already inside */
if (!(oldmask & EV_EMASK_EPERM))
{
array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, EMPTY2);
array_needsize (int, epoll_eperms, epoll_epermmax, epoll_epermcnt + 1, array_needsize_noinit);
epoll_eperms [epoll_epermcnt++] = fd;
}

View File

@ -1,7 +1,7 @@
/*
* libev kqueue backend
*
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2016,2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -48,7 +48,7 @@ void
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
{
++kqueue_changecnt;
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, array_needsize_noinit);
EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0);
}

342
ev_linuxaio.c Normal file
View File

@ -0,0 +1,342 @@
/*
* libev linux aio fd activity backend
*
* Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
* CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
* ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Alternatively, the contents of this file may be used under the terms of
* the GNU General Public License ("GPL") version 2 or any later version,
* in which case the provisions of the GPL are applicable instead of
* the above. If you wish to allow the use of your version of this file
* only under the terms of the GPL and not to allow others to use your
* version of this file under the BSD license, indicate your decision
* by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL. If you do not delete the
* provisions above, a recipient may use your version of this file under
* either the BSD or the GPL.
*/
#include <sys/time.h> /* actually linux/time.h, but we must assume they are compatible */
#include <linux/aio_abi.h>
/* we try to fill 4kn pages exactly.
* the ring buffer header is 32 bytes, every io event is 32 bytes.
* the kernel takes the io event number, doubles it, adds 2, adds the ring buffer
* so the calculation below will use "exactly" 8kB for the ring buffer
*/
#define EV_LINUXAIO_DEPTH (256 / 2 - 2 - 1) /* max. number of io events per batch */
/*****************************************************************************/
/* syscall wrapdadoop */
#include <sys/syscall.h> /* no glibc wrappers */
/* aio_abi.h is not verioned in any way, so we cannot test for its existance */
#define IOCB_CMD_POLL 5
/* taken from linux/fs/aio.c */
#define AIO_RING_MAGIC 0xa10a10a1
#define AIO_RING_INCOMPAT_FEATURES 0
struct aio_ring
{
unsigned id; /* kernel internal index number */
unsigned nr; /* number of io_events */
unsigned head; /* Written to by userland or by kernel. */
unsigned tail;
unsigned magic;
unsigned compat_features;
unsigned incompat_features;
unsigned header_length; /* size of aio_ring */
struct io_event io_events[0];
};
static int
ev_io_setup (unsigned nr_events, aio_context_t *ctx_idp)
{
return syscall (SYS_io_setup, nr_events, ctx_idp);
}
static int
ev_io_destroy (aio_context_t ctx_id)
{
return syscall (SYS_io_destroy, ctx_id);
}
static int
ev_io_submit (aio_context_t ctx_id, long nr, struct iocb *cbp[])
{
return syscall (SYS_io_submit, ctx_id, nr, cbp);
}
static int
ev_io_cancel (aio_context_t ctx_id, struct iocb *cbp, struct io_event *result)
{
return syscall (SYS_io_cancel, ctx_id, cbp, result);
}
static int
ev_io_getevents (aio_context_t ctx_id, long min_nr, long nr, struct io_event *events, struct timespec *timeout)
{
return syscall (SYS_io_getevents, ctx_id, min_nr, nr, events, timeout);
}
typedef void (*ev_io_cb) (long nr, struct io_event *events);
/*****************************************************************************/
/* actual backed implementation */
/* two iocbs for every fd, one for read, one for write */
typedef struct aniocb
{
struct iocb io;
/*int inuse;*/
} *ANIOCBP;
inline_size
void
linuxaio_array_needsize_iocbp (ANIOCBP *base, int count)
{
/* TODO: quite the overhead to allocate every iocb separately */
while (count--)
{
*base = (ANIOCBP)ev_malloc (sizeof (**base));
memset (*base, 0, sizeof (**base));
/* would be nice to initialize fd/data as well */
(*base)->io.aio_lio_opcode = IOCB_CMD_POLL;
++base;
}
}
static void
linuxaio_free_iocbp (EV_P)
{
while (linuxaio_iocbpmax--)
ev_free (linuxaio_iocbps [linuxaio_iocbpmax]);
linuxaio_iocbpmax = 0;
}
static void
linuxaio_modify (EV_P_ int fd, int oev, int nev)
{
/* TODO: full zero initialize required? */
array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp);
struct aniocb *iocb = linuxaio_iocbps [fd];
if (iocb->io.aio_buf)
ev_io_cancel (linuxaio_ctx, &iocb->io, (void *)0);
if (nev)
{
iocb->io.aio_data = fd;
iocb->io.aio_fildes = fd;
iocb->io.aio_buf =
(nev & EV_READ ? POLLIN : 0)
| (nev & EV_WRITE ? POLLOUT : 0);
/* queue iocb up for io_submit */
/* this assumes we only ever get one call per fd per loop iteration */
++linuxaio_submitcnt;
array_needsize (struct iocb *, linuxaio_submits, linuxaio_submitmax, linuxaio_submitcnt, array_needsize_noinit);
linuxaio_submits [linuxaio_submitcnt - 1] = &iocb->io;
}
}
static void
linuxaio_parse_events (EV_P_ struct io_event *ev, int nr)
{
while (nr)
{
int fd = ev->data;
int res = ev->res;
assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdxmax));
/* linux aio is oneshot: rearm fd */
linuxaio_iocbps [fd]->io.aio_buf = 0;
anfds [fd].events = 0;
fd_change (EV_A_ fd, 0);
/* feed events, we do not expect or handle POLLNVAL */
if (ecb_expect_false (res & POLLNVAL))
fd_kill (EV_A_ fd);
else
fd_event (
EV_A_
fd,
(res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
| (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
);
--nr;
++ev;
}
}
/* get any events from ringbuffer, return true if any were handled */
static int
linuxaio_get_events_from_ring (EV_P)
{
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx;
ECB_MEMORY_FENCE_ACQUIRE;
unsigned head = ring->head;
unsigned tail = *(volatile unsigned *)&ring->tail;
if (ring->magic != AIO_RING_MAGIC
|| ring->incompat_features != AIO_RING_INCOMPAT_FEATURES
|| ring->header_length != sizeof (struct aio_ring) /* TODO: or use it to find io_event[0]? */
|| head == tail)
return 0;
/* parse all available events, but only once, to avoid starvation */
if (tail > head) /* normal case around */
linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head);
else
{
/* wrapped around */
linuxaio_parse_events (EV_A_ ring->io_events + head, ring->nr - head);
linuxaio_parse_events (EV_A_ ring->io_events, tail);
}
ring->head = tail;
return 1;
}
/* read at least one event from kernel, or timeout */
inline_size
void
linuxaio_get_events (EV_P_ ev_tstamp timeout)
{
struct timespec ts;
struct io_event ioev;
int res;
if (linuxaio_get_events_from_ring (EV_A))
return;
/* no events, so wait for at least one, then poll ring buffer again */
/* this degraded to one event per loop iteration */
/* if the ring buffer changes layout, but so be it */
ts.tv_sec = (long)timeout;
ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9);
res = ev_io_getevents (linuxaio_ctx, 1, 1, &ioev, &ts);
if (res < 0)
ev_syserr ("(libev) io_getevents");
else if (res)
{
/* at least one event received, handle it and any remaining ones in the ring buffer */
linuxaio_parse_events (EV_A_ &ioev, 1);
linuxaio_get_events_from_ring (EV_A);
}
}
static void
linuxaio_poll (EV_P_ ev_tstamp timeout)
{
int submitted;
/* first phase: submit new iocbs */
/* io_submit might return less than the requested number of iocbs */
/* this is, afaics, only because of errors, but we go by the book and use a loop, */
/* which allows us to pinpoint the errornous iocb */
for (submitted = 0; submitted < linuxaio_submitcnt; )
{
int res = ev_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted);
if (res < 0)
if (errno == EAGAIN)
{
/* This happens when the ring buffer is full, at least. I assume this means
* that the event was queued synchronously during io_submit, and thus
* the buffer overflowd.
* In this case, we just try next loop iteration.
*/
memcpy (linuxaio_submits, linuxaio_submits + submitted, (linuxaio_submitcnt - submitted) * sizeof (*linuxaio_submits));
linuxaio_submitcnt -= submitted;
timeout = 0;
break;
}
else
/* TODO: we get EAGAIN when the ring buffer is full for some reason */
/* TODO: should we always just try next time? */
ev_syserr ("(libev) io_submit");
submitted += res;
}
linuxaio_submitcnt = 0;
/* second phase: fetch and parse events */
linuxaio_get_events (EV_A_ timeout);
}
inline_size
int
linuxaio_init (EV_P_ int flags)
{
/* would be great to have a nice test for IOCB_CMD_POLL instead */
if (ev_linux_version () < 0x041200) /* 4.18 introduced IOCB_CMD_POLL */
return 0;
if (ev_io_setup (EV_LINUXAIO_DEPTH, &linuxaio_ctx) < 0)
return 0;
backend_modify = linuxaio_modify;
backend_poll = linuxaio_poll;
linuxaio_iocbpmax = 0;
linuxaio_iocbps = 0;
linuxaio_submits = 0;
linuxaio_submitmax = 0;
linuxaio_submitcnt = 0;
return EVBACKEND_LINUXAIO;
}
inline_size
void
linuxaio_destroy (EV_P)
{
linuxaio_free_iocbp (EV_A);
ev_io_destroy (linuxaio_ctx);
}
inline_size
void
linuxaio_fork (EV_P)
{
abort ();//D
}

View File

@ -1,7 +1,7 @@
/*
* libev poll fd activity backend
*
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -64,7 +64,7 @@ poll_modify (EV_P_ int fd, int oev, int nev)
if (idx < 0) /* need to allocate a new pollfd */
{
pollidxs [fd] = idx = pollcnt++;
array_needsize (struct pollfd, polls, pollmax, pollcnt, EMPTY2);
array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit);
polls [idx].fd = fd;
}

View File

@ -1,7 +1,7 @@
/*
* loop member variable declarations
*
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
@ -107,6 +107,15 @@ VARx(int, epoll_epermcnt)
VARx(int, epoll_epermmax)
#endif
#if EV_USE_LINUXAIO || EV_GENWRAP
VARx(aio_context_t, linuxaio_ctx)
VARx(struct aniocb **, linuxaio_iocbps)
VARx(int, linuxaio_iocbpmax)
VARx(struct iocb **, linuxaio_submits)
VARx(int, linuxaio_submitcnt)
VARx(int, linuxaio_submitmax)
#endif
#if EV_USE_KQUEUE || EV_GENWRAP
VARx(pid_t, kqueue_fd_pid)
VARx(struct kevent *, kqueue_changes)

View File

@ -50,6 +50,12 @@
#define kqueue_eventmax ((loop)->kqueue_eventmax)
#define kqueue_events ((loop)->kqueue_events)
#define kqueue_fd_pid ((loop)->kqueue_fd_pid)
#define linuxaio_ctx ((loop)->linuxaio_ctx)
#define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax)
#define linuxaio_iocbps ((loop)->linuxaio_iocbps)
#define linuxaio_submitcnt ((loop)->linuxaio_submitcnt)
#define linuxaio_submitmax ((loop)->linuxaio_submitmax)
#define linuxaio_submits ((loop)->linuxaio_submits)
#define loop_count ((loop)->loop_count)
#define loop_depth ((loop)->loop_depth)
#define loop_done ((loop)->loop_done)
@ -149,6 +155,12 @@
#undef kqueue_eventmax
#undef kqueue_events
#undef kqueue_fd_pid
#undef linuxaio_ctx
#undef linuxaio_iocbpmax
#undef linuxaio_iocbps
#undef linuxaio_submitcnt
#undef linuxaio_submitmax
#undef linuxaio_submits
#undef loop_count
#undef loop_depth
#undef loop_done