lighttpd1.4/src/connections.c

1512 lines
45 KiB
C
Raw Normal View History

#include "first.h"
#include "base.h"
#include "buffer.h"
#include "burl.h" /* HTTP_PARSEOPT_HEADER_STRICT */
#include "log.h"
#include "connections.h"
#include "fdevent.h"
#include "http_header.h"
#include "configfile.h"
#include "request.h"
#include "response.h"
#include "network.h"
#include "http_chunk.h"
#include "stat_cache.h"
#include "joblist.h"
#include "plugin.h"
#include "inet_ntop_cache.h"
#include <sys/stat.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
#include "sys-socket.h"
#define HTTP_LINGER_TIMEOUT 5
#define connection_set_state(con, n) ((con)->state = (n))
typedef struct {
PLUGIN_DATA;
} plugin_data;
__attribute_cold__
static connection *connection_init(server *srv);
static int connection_reset(server *srv, connection *con);
static connection *connections_get_new_connection(server *srv) {
connections *conns = srv->conns;
size_t i;
if (conns->size == conns->used) {
conns->size += srv->max_conns >= 128 ? 128 : srv->max_conns > 16 ? 16 : srv->max_conns;
conns->ptr = realloc(conns->ptr, sizeof(*conns->ptr) * conns->size);
force_assert(NULL != conns->ptr);
for (i = conns->used; i < conns->size; i++) {
conns->ptr[i] = connection_init(srv);
connection_reset(srv, conns->ptr[i]);
}
}
conns->ptr[conns->used]->ndx = conns->used;
return conns->ptr[conns->used++];
}
static int connection_del(server *srv, connection *con) {
size_t i;
connections *conns = srv->conns;
connection *temp;
if (con == NULL) return -1;
if (-1 == con->ndx) return -1;
buffer_clear(con->uri.authority);
buffer_reset(con->uri.path);
buffer_reset(con->uri.query);
buffer_reset(con->request.orig_uri);
i = con->ndx;
/* not last element */
if (i != conns->used - 1) {
temp = conns->ptr[i];
conns->ptr[i] = conns->ptr[conns->used - 1];
conns->ptr[conns->used - 1] = temp;
conns->ptr[i]->ndx = i;
conns->ptr[conns->used - 1]->ndx = -1;
}
conns->used--;
con->ndx = -1;
#if 0
fprintf(stderr, "%s.%d: del: (%d)", __FILE__, __LINE__, conns->used);
for (i = 0; i < conns->used; i++) {
fprintf(stderr, "%d ", conns->ptr[i]->fd);
}
fprintf(stderr, "\n");
#endif
return 0;
}
static int connection_close(server *srv, connection *con) {
if (con->fd < 0) con->fd = -con->fd;
plugins_call_handle_connection_close(srv, con);
con->request_count = 0;
chunkqueue_reset(con->read_queue);
fdevent_fdnode_event_del(srv->ev, con->fdn);
fdevent_unregister(srv->ev, con->fd);
con->fdn = NULL;
#ifdef __WIN32
if (closesocket(con->fd)) {
log_error_write(srv, __FILE__, __LINE__, "sds",
"(warning) close:", con->fd, strerror(errno));
}
#else
if (close(con->fd)) {
log_error_write(srv, __FILE__, __LINE__, "sds",
"(warning) close:", con->fd, strerror(errno));
}
#endif
else {
srv->cur_fds--;
}
if (srv->srvconf.log_state_handling) {
log_error_write(srv, __FILE__, __LINE__, "sd",
"connection closed for fd", con->fd);
}
con->fd = -1;
con->is_ssl_sock = 0;
/* plugins should have cleaned themselves up */
for (size_t i = 0; i < srv->plugins.used; ++i) {
plugin *p = ((plugin **)(srv->plugins.ptr))[i];
plugin_data *pd = p->data;
if (!pd || NULL == con->plugin_ctx[pd->id]) continue;
log_error_write(srv, __FILE__, __LINE__, "sb",
"missing cleanup in", p->name);
con->plugin_ctx[pd->id] = NULL;
}
connection_del(srv, con);
connection_set_state(con, CON_STATE_CONNECT);
return 0;
}
static void connection_read_for_eos_plain(server *srv, connection *con) {
/* we have to do the linger_on_close stuff regardless
* of con->keep_alive; even non-keepalive sockets may
* still have unread data, and closing before reading
* it will make the client not see all our output.
*/
ssize_t len;
const int type = con->dst_addr.plain.sa_family;
char buf[16384];
do {
len = fdevent_socket_read_discard(con->fd, buf, sizeof(buf),
type, SOCK_STREAM);
} while (len > 0 || (len < 0 && errno == EINTR));
if (len < 0 && errno == EAGAIN) return;
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
if (len < 0 && errno == EWOULDBLOCK) return;
#endif
/* 0 == len || (len < 0 && (errno is a non-recoverable error)) */
con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1);
}
static void connection_read_for_eos_ssl(server *srv, connection *con) {
if (con->network_read(srv, con, con->read_queue, MAX_READ_LIMIT) < 0)
con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1);
chunkqueue_reset(con->read_queue);
}
static void connection_read_for_eos(server *srv, connection *con) {
!con->is_ssl_sock
? connection_read_for_eos_plain(srv, con)
: connection_read_for_eos_ssl(srv, con);
}
static void connection_handle_close_state(server *srv, connection *con) {
connection_read_for_eos(srv, con);
if (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT) {
connection_close(srv, con);
}
}
static void connection_handle_shutdown(server *srv, connection *con) {
plugins_call_handle_connection_shut_wr(srv, con);
srv->con_closed++;
connection_reset(srv, con);
/* close the connection */
if (con->fd >= 0
&& (con->is_ssl_sock || 0 == shutdown(con->fd, SHUT_WR))) {
con->close_timeout_ts = srv->cur_ts;
connection_set_state(con, CON_STATE_CLOSE);
if (srv->srvconf.log_state_handling) {
log_error_write(srv, __FILE__, __LINE__, "sd",
"shutdown for fd", con->fd);
}
} else {
connection_close(srv, con);
}
}
static void connection_handle_response_end_state(server *srv, connection *con) {
/* log the request */
/* (even if error, connection dropped, still write to access log if http_status) */
if (con->http_status) {
plugins_call_handle_request_done(srv, con);
}
if (con->state != CON_STATE_ERROR) srv->con_written++;
if (con->request.content_length != con->request_content_queue->bytes_in
|| con->state == CON_STATE_ERROR) {
/* request body is present and has not been read completely */
con->keep_alive = 0;
}
if (con->keep_alive) {
connection_reset(srv, con);
#if 0
con->request_start = srv->cur_ts;
con->read_idle_ts = srv->cur_ts;
#endif
connection_set_state(con, CON_STATE_REQUEST_START);
} else {
connection_handle_shutdown(srv, con);
}
}
static void connection_handle_errdoc_init(connection *con) {
/* modules that produce headers required with error response should
* typically also produce an error document. Make an exception for
* mod_auth WWW-Authenticate response header. */
buffer *www_auth = NULL;
if (401 == con->http_status) {
buffer *vb = http_header_response_get(con, HTTP_HEADER_OTHER, CONST_STR_LEN("WWW-Authenticate"));
if (NULL != vb) www_auth = buffer_init_buffer(vb);
}
buffer_reset(con->physical.path);
con->response.htags = 0;
array_reset_data_strings(con->response.headers);
http_response_body_clear(con, 0);
if (NULL != www_auth) {
http_header_response_set(con, HTTP_HEADER_OTHER, CONST_STR_LEN("WWW-Authenticate"), CONST_BUF_LEN(www_auth));
buffer_free(www_auth);
}
}
static int connection_handle_write_prepare(server *srv, connection *con) {
if (con->mode == DIRECT) {
/* static files */
switch(con->request.http_method) {
case HTTP_METHOD_GET:
case HTTP_METHOD_POST:
case HTTP_METHOD_HEAD:
break;
case HTTP_METHOD_OPTIONS:
/*
* 400 is coming from the request-parser BEFORE uri.path is set
* 403 is from the response handler when noone else catched it
*
* */
if ((!con->http_status || con->http_status == 200) && !buffer_string_is_empty(con->uri.path) &&
con->uri.path->ptr[0] != '*') {
http_response_body_clear(con, 0);
http_header_response_append(con, HTTP_HEADER_OTHER, CONST_STR_LEN("Allow"), CONST_STR_LEN("OPTIONS, GET, HEAD, POST"));
con->http_status = 200;
con->file_finished = 1;
}
break;
default:
if (0 == con->http_status) {
con->http_status = 501;
}
break;
}
}
if (con->http_status == 0) {
con->http_status = 403;
}
switch(con->http_status) {
case 204: /* class: header only */
case 205:
case 304:
/* disable chunked encoding again as we have no body */
http_response_body_clear(con, 1);
con->file_finished = 1;
break;
default: /* class: header + body */
/* only custom body for 4xx and 5xx */
if (con->http_status < 400 || con->http_status >= 600) break;
if (con->mode != DIRECT && (!con->conf.error_intercept || con->error_handler_saved_status)) break;
if (con->mode == DIRECT && con->error_handler_saved_status >= 65535) break;
con->file_finished = 0;
connection_handle_errdoc_init(con);
/* try to send static errorfile */
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
if (!buffer_string_is_empty(con->conf.errorfile_prefix)) {
stat_cache_entry *sce = NULL;
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_copy_buffer(con->physical.path, con->conf.errorfile_prefix);
buffer_append_int(con->physical.path, con->http_status);
buffer_append_string_len(con->physical.path, CONST_STR_LEN(".html"));
[core] open fd when appending file to cq (fixes #2655) http_chunk_append_file() opens fd when appending file to chunkqueue. Defers calculation of content length until response is finished. This reduces race conditions pertaining to stat() and then (later) open(), when the result of the stat() was used for Content-Length or to generate chunked headers. Note: this does not change how lighttpd handles files that are modified in-place by another process after having been opened by lighttpd -- don't do that. This *does* improve handling of files that are frequently modified via a temporary file and then atomically renamed into place. mod_fastcgi has been modified to use http_chunk_append_file_range() with X-Sendfile2 and will open the target file multiple times if there are multiple ranges. Note: (future todo) not implemented for chunk.[ch] interfaces used by range requests in mod_staticfile or by mod_ssi. Those uses could lead to too many open fds. For mod_staticfile, limits should be put in place for max number of ranges accepted by mod_staticfile. For mod_ssi, limits would need to be placed on the maximum number of includes, and the primary SSI file split across lots of SSI directives should either copy the pieces or perhaps chunk.h could be extended to allow for an open fd to be shared across multiple chunks. Doing either of these would improve the performance of SSI since they would replace many file opens on the pieces of the SSI file around the SSI directives. x-ref: "Serving a file that is getting updated can cause an empty response or incorrect content-length error" https://redmine.lighttpd.net/issues/2655 github: Closes #49
2016-03-30 10:39:33 +00:00
if (0 == http_chunk_append_file(srv, con, con->physical.path)) {
con->file_finished = 1;
[core] open fd when appending file to cq (fixes #2655) http_chunk_append_file() opens fd when appending file to chunkqueue. Defers calculation of content length until response is finished. This reduces race conditions pertaining to stat() and then (later) open(), when the result of the stat() was used for Content-Length or to generate chunked headers. Note: this does not change how lighttpd handles files that are modified in-place by another process after having been opened by lighttpd -- don't do that. This *does* improve handling of files that are frequently modified via a temporary file and then atomically renamed into place. mod_fastcgi has been modified to use http_chunk_append_file_range() with X-Sendfile2 and will open the target file multiple times if there are multiple ranges. Note: (future todo) not implemented for chunk.[ch] interfaces used by range requests in mod_staticfile or by mod_ssi. Those uses could lead to too many open fds. For mod_staticfile, limits should be put in place for max number of ranges accepted by mod_staticfile. For mod_ssi, limits would need to be placed on the maximum number of includes, and the primary SSI file split across lots of SSI directives should either copy the pieces or perhaps chunk.h could be extended to allow for an open fd to be shared across multiple chunks. Doing either of these would improve the performance of SSI since they would replace many file opens on the pieces of the SSI file around the SSI directives. x-ref: "Serving a file that is getting updated can cause an empty response or incorrect content-length error" https://redmine.lighttpd.net/issues/2655 github: Closes #49
2016-03-30 10:39:33 +00:00
if (HANDLER_ERROR != stat_cache_get_entry(srv, con, con->physical.path, &sce)) {
stat_cache_content_type_get(srv, con, con->physical.path, sce);
http_header_response_set(con, HTTP_HEADER_CONTENT_TYPE, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(sce->content_type));
[core] open fd when appending file to cq (fixes #2655) http_chunk_append_file() opens fd when appending file to chunkqueue. Defers calculation of content length until response is finished. This reduces race conditions pertaining to stat() and then (later) open(), when the result of the stat() was used for Content-Length or to generate chunked headers. Note: this does not change how lighttpd handles files that are modified in-place by another process after having been opened by lighttpd -- don't do that. This *does* improve handling of files that are frequently modified via a temporary file and then atomically renamed into place. mod_fastcgi has been modified to use http_chunk_append_file_range() with X-Sendfile2 and will open the target file multiple times if there are multiple ranges. Note: (future todo) not implemented for chunk.[ch] interfaces used by range requests in mod_staticfile or by mod_ssi. Those uses could lead to too many open fds. For mod_staticfile, limits should be put in place for max number of ranges accepted by mod_staticfile. For mod_ssi, limits would need to be placed on the maximum number of includes, and the primary SSI file split across lots of SSI directives should either copy the pieces or perhaps chunk.h could be extended to allow for an open fd to be shared across multiple chunks. Doing either of these would improve the performance of SSI since they would replace many file opens on the pieces of the SSI file around the SSI directives. x-ref: "Serving a file that is getting updated can cause an empty response or incorrect content-length error" https://redmine.lighttpd.net/issues/2655 github: Closes #49
2016-03-30 10:39:33 +00:00
}
}
}
if (!con->file_finished) {
buffer *b = srv->tmp_buf;
buffer_reset(con->physical.path);
con->file_finished = 1;
/* build default error-page */
buffer_copy_string_len(b, CONST_STR_LEN(
"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n"
" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"
"<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n"
" <head>\n"
" <title>"));
2018-09-23 22:36:58 +00:00
http_status_append(b, con->http_status);
buffer_append_string_len(b, CONST_STR_LEN(
"</title>\n"
" </head>\n"
" <body>\n"
" <h1>"));
2018-09-23 22:36:58 +00:00
http_status_append(b, con->http_status);
buffer_append_string_len(b, CONST_STR_LEN("</h1>\n"
" </body>\n"
"</html>\n"
));
(void)http_chunk_append_mem(srv, con, CONST_BUF_LEN(b));
http_header_response_set(con, HTTP_HEADER_CONTENT_TYPE, CONST_STR_LEN("Content-Type"), CONST_STR_LEN("text/html"));
}
break;
}
/* Allow filter plugins to change response headers before they are written. */
switch(plugins_call_handle_response_start(srv, con)) {
case HANDLER_GO_ON:
case HANDLER_FINISHED:
break;
default:
log_error_write(srv, __FILE__, __LINE__, "s", "response_start plugin failed");
return -1;
}
if (con->file_finished) {
/* we have all the content and chunked encoding is not used, set a content-length */
if (!(con->response.htags & (HTTP_HEADER_CONTENT_LENGTH|HTTP_HEADER_TRANSFER_ENCODING))) {
off_t qlen = chunkqueue_length(con->write_queue);
/**
* The Content-Length header only can be sent if we have content:
* - HEAD doesn't have a content-body (but have a content-length)
* - 1xx, 204 and 304 don't have a content-body (RFC 2616 Section 4.3)
*
* Otherwise generate a Content-Length header as chunked encoding is not
* available
*/
if ((con->http_status >= 100 && con->http_status < 200) ||
con->http_status == 204 ||
con->http_status == 304) {
/* no Content-Body, no Content-Length */
http_header_response_unset(con, HTTP_HEADER_CONTENT_LENGTH, CONST_STR_LEN("Content-Length"));
} else if (qlen > 0 || con->request.http_method != HTTP_METHOD_HEAD) {
/* qlen = 0 is important for Redirects (301, ...) as they MAY have
* a content. Browsers are waiting for a Content otherwise
*/
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_copy_int(srv->tmp_buf, qlen);
http_header_response_set(con, HTTP_HEADER_CONTENT_LENGTH, CONST_STR_LEN("Content-Length"), CONST_BUF_LEN(srv->tmp_buf));
}
}
} else {
/**
* the file isn't finished yet, but we have all headers
*
* to get keep-alive we either need:
* - Content-Length: ... (HTTP/1.0 and HTTP/1.0) or
* - Transfer-Encoding: chunked (HTTP/1.1)
* - Upgrade: ... (lighttpd then acts as transparent proxy)
*/
if (!(con->response.htags & (HTTP_HEADER_CONTENT_LENGTH|HTTP_HEADER_TRANSFER_ENCODING|HTTP_HEADER_UPGRADE))) {
if (con->request.http_method == HTTP_METHOD_CONNECT
&& con->http_status == 200) {
/*(no transfer-encoding if successful CONNECT)*/
} else if (con->request.http_version == HTTP_VERSION_1_1) {
off_t qlen = chunkqueue_length(con->write_queue);
con->response.send_chunked = 1;
if (qlen) {
/* create initial Transfer-Encoding: chunked segment */
buffer * const b = chunkqueue_prepend_buffer_open(con->write_queue);
buffer_append_uint_hex(b, (uintmax_t)qlen);
buffer_append_string_len(b, CONST_STR_LEN("\r\n"));
chunkqueue_prepend_buffer_commit(con->write_queue);
chunkqueue_append_mem(con->write_queue, CONST_STR_LEN("\r\n"));
}
http_header_response_append(con, HTTP_HEADER_TRANSFER_ENCODING, CONST_STR_LEN("Transfer-Encoding"), CONST_STR_LEN("chunked"));
} else {
con->keep_alive = 0;
}
}
}
if (con->request.http_method == HTTP_METHOD_HEAD) {
/**
* a HEAD request has the same as a GET
* without the content
*/
http_response_body_clear(con, 1);
con->file_finished = 1;
}
http_response_write_header(srv, con);
return 0;
}
static void connection_handle_write(server *srv, connection *con) {
switch(connection_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) {
case 0:
con->write_request_ts = srv->cur_ts;
if (con->file_finished) {
connection_set_state(con, CON_STATE_RESPONSE_END);
}
break;
case -1: /* error on our side */
log_error_write(srv, __FILE__, __LINE__, "sd",
"connection closed: write failed on fd", con->fd);
connection_set_state(con, CON_STATE_ERROR);
break;
case -2: /* remote close */
connection_set_state(con, CON_STATE_ERROR);
break;
case 1:
con->write_request_ts = srv->cur_ts;
con->is_writable = 0;
/* not finished yet -> WRITE */
break;
}
}
static void connection_handle_write_state(server *srv, connection *con) {
do {
/* only try to write if we have something in the queue */
if (!chunkqueue_is_empty(con->write_queue)) {
if (con->is_writable) {
connection_handle_write(srv, con);
if (con->state != CON_STATE_WRITE) break;
}
} else if (con->file_finished) {
connection_set_state(con, CON_STATE_RESPONSE_END);
break;
}
if (con->mode != DIRECT && !con->file_finished) {
int r = plugins_call_handle_subrequest(srv, con);
switch(r) {
case HANDLER_WAIT_FOR_EVENT:
case HANDLER_FINISHED:
case HANDLER_GO_ON:
break;
case HANDLER_WAIT_FOR_FD:
srv->want_fds++;
fdwaitqueue_append(srv, con);
break;
case HANDLER_COMEBACK:
default:
log_error_write(srv, __FILE__, __LINE__, "sdd",
"unexpected subrequest handler ret-value:",
con->fd, r);
/* fall through */
case HANDLER_ERROR:
connection_set_state(con, CON_STATE_ERROR);
break;
}
}
} while (con->state == CON_STATE_WRITE
&& (!chunkqueue_is_empty(con->write_queue)
? con->is_writable
: con->file_finished));
}
__attribute_cold__
static connection *connection_init(server *srv) {
connection *con;
UNUSED(srv);
con = calloc(1, sizeof(*con));
force_assert(NULL != con);
con->fd = 0;
con->ndx = -1;
con->bytes_written = 0;
con->bytes_read = 0;
con->bytes_header = 0;
con->loops_per_request = 0;
#define CLEAN(x) \
con->x = buffer_init();
CLEAN(request.uri);
CLEAN(request.request);
CLEAN(request.pathinfo);
CLEAN(request.orig_uri);
CLEAN(uri.scheme);
CLEAN(uri.authority);
CLEAN(uri.path);
CLEAN(uri.path_raw);
CLEAN(uri.query);
CLEAN(physical.doc_root);
CLEAN(physical.path);
CLEAN(physical.basedir);
CLEAN(physical.rel_path);
CLEAN(physical.etag);
CLEAN(server_name);
CLEAN(proto);
CLEAN(dst_addr_buf);
#undef CLEAN
con->write_queue = chunkqueue_init();
con->read_queue = chunkqueue_init();
con->request_content_queue = chunkqueue_init();
con->request.headers = array_init();
con->response.headers = array_init();
con->environment = array_init();
/* init plugin specific connection structures */
con->plugin_ctx = calloc(1, (srv->plugins.used + 1) * sizeof(void *));
force_assert(NULL != con->plugin_ctx);
con->cond_cache = calloc(srv->config_context->used, sizeof(cond_cache_t));
force_assert(NULL != con->cond_cache);
config_setup_connection(srv, con);
return con;
}
void connections_free(server *srv) {
connections *conns = srv->conns;
size_t i;
[core] graceful restart with SIGUSR1 (fixes #2785) more consistent cleanup of resources at shutdown (e.g. upon error conditions) Notes: graceful restart with SIGUSR1 - not available if chroot()ed, oneshot mode, or if idle timeout occurs - preserve process id (pid) - preserve existing listen sockets - i.e. does not close old listen sockets from prior configs (even if old listen sockets no longer in the new config) (sockets may have been bound w/ root privileges no longer available) - will fail to add listen sockets from new config if privileges lighttpd configured to drop privileges to non-root user, and new listen socket attempts to bind to low-numbered port requiring root privileges. - will fail if listen sockets in new config conflict with any previous old listen sockets - These failure modes will result in lighttpd shutting down instead of graceful restart. These failure modes are not detectable with preflight checks ('lighttpd -tt -f lighttpd.conf') because the new instance of lighttpd running the preflight check does not known config state of n prior graceful restarts, or even the config state of the currently running lighttpd server. - due to lighttpd feature of optionally managing backends (e.g. fastcgi and scgi via "bin-path"), lighttpd must wait for all child processes to exit prior to restarting. Restarting new workers while old workers (and old backends) were still running would result in failure of restarted lighttpd process to be able to bind to sockets already in use by old backends (e.g. unix "socket" path) x-ref: "graceful restart with SIGUSR1" https://redmine.lighttpd.net/issues/2785
2017-01-24 16:29:10 +00:00
if (NULL == conns) return;
for (i = 0; i < conns->size; i++) {
connection *con = conns->ptr[i];
connection_reset(srv, con);
chunkqueue_free(con->write_queue);
chunkqueue_free(con->read_queue);
chunkqueue_free(con->request_content_queue);
array_free(con->request.headers);
array_free(con->response.headers);
array_free(con->environment);
#define CLEAN(x) \
buffer_free(con->x);
CLEAN(request.uri);
CLEAN(request.request);
CLEAN(request.pathinfo);
CLEAN(request.orig_uri);
CLEAN(uri.scheme);
CLEAN(uri.authority);
CLEAN(uri.path);
CLEAN(uri.path_raw);
CLEAN(uri.query);
CLEAN(physical.doc_root);
CLEAN(physical.path);
CLEAN(physical.basedir);
CLEAN(physical.etag);
CLEAN(physical.rel_path);
CLEAN(server_name);