lighttpd1.4/src/connections.c

1421 lines
37 KiB
C
Raw Normal View History

#include "first.h"
#include "buffer.h"
#include "server.h"
#include "log.h"
#include "connections.h"
#include "fdevent.h"
#include "configfile.h"
#include "request.h"
#include "response.h"
#include "network.h"
#include "http_chunk.h"
#include "stat_cache.h"
#include "joblist.h"
#include "plugin.h"
#include "inet_ntop_cache.h"
#include <sys/stat.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <assert.h>
#ifdef USE_OPENSSL
# include <openssl/ssl.h>
# include <openssl/err.h>
#endif
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
#include "sys-socket.h"
typedef struct {
PLUGIN_DATA;
} plugin_data;
static connection *connections_get_new_connection(server *srv) {
connections *conns = srv->conns;
size_t i;
if (conns->size == 0) {
conns->size = 128;
conns->ptr = NULL;
conns->ptr = malloc(sizeof(*conns->ptr) * conns->size);
force_assert(NULL != conns->ptr);
for (i = 0; i < conns->size; i++) {
conns->ptr[i] = connection_init(srv);
}
} else if (conns->size == conns->used) {
conns->size += 128;
conns->ptr = realloc(conns->ptr, sizeof(*conns->ptr) * conns->size);
force_assert(NULL != conns->ptr);
for (i = conns->used; i < conns->size; i++) {
conns->ptr[i] = connection_init(srv);
}
}
connection_reset(srv, conns->ptr[conns->used]);
#if 0
fprintf(stderr, "%s.%d: add: ", __FILE__, __LINE__);
for (i = 0; i < conns->used + 1; i++) {
fprintf(stderr, "%d ", conns->ptr[i]->fd);
}
fprintf(stderr, "\n");
#endif
conns->ptr[conns->used]->ndx = conns->used;
return conns->ptr[conns->used++];
}
static int connection_del(server *srv, connection *con) {
size_t i;
connections *conns = srv->conns;
connection *temp;
if (con == NULL) return -1;
if (-1 == con->ndx) return -1;
buffer_reset(con->uri.authority);
buffer_reset(con->uri.path);
buffer_reset(con->uri.query);
buffer_reset(con->request.orig_uri);
i = con->ndx;
/* not last element */
if (i != conns->used - 1) {
temp = conns->ptr[i];
conns->ptr[i] = conns->ptr[conns->used - 1];
conns->ptr[conns->used - 1] = temp;
conns->ptr[i]->ndx = i;
conns->ptr[conns->used - 1]->ndx = -1;
}
conns->used--;
con->ndx = -1;
#if 0
fprintf(stderr, "%s.%d: del: (%d)", __FILE__, __LINE__, conns->used);
for (i = 0; i < conns->used; i++) {
fprintf(stderr, "%d ", conns->ptr[i]->fd);
}
fprintf(stderr, "\n");
#endif
return 0;
}
static int connection_close(server *srv, connection *con) {
#ifdef USE_OPENSSL
server_socket *srv_sock = con->srv_socket;
if (srv_sock->is_ssl) {
if (con->ssl) SSL_free(con->ssl);
con->ssl = NULL;
}
#endif
fdevent_event_del(srv->ev, &(con->fde_ndx), con->fd);
fdevent_unregister(srv->ev, con->fd);
#ifdef __WIN32
if (closesocket(con->fd)) {
log_error_write(srv, __FILE__, __LINE__, "sds",
"(warning) close:", con->fd, strerror(errno));
}
#else
if (close(con->fd)) {
log_error_write(srv, __FILE__, __LINE__, "sds",
"(warning) close:", con->fd, strerror(errno));
}
#endif
else {
srv->cur_fds--;
}
if (srv->srvconf.log_state_handling) {
log_error_write(srv, __FILE__, __LINE__, "sd",
"connection closed for fd", con->fd);
}
con->fd = -1;
connection_del(srv, con);
connection_set_state(srv, con, CON_STATE_CONNECT);
return 0;
}
static void connection_read_for_eos(server *srv, connection *con) {
/* we have to do the linger_on_close stuff regardless
* of con->keep_alive; even non-keepalive sockets may
* still have unread data, and closing before reading
* it will make the client not see all our output.
*/
ssize_t len;
char buf[4096];
do {
len = read(con->fd, buf, sizeof(buf));
} while (len > 0 || (len < 0 && errno == EINTR));
if (len < 0 && errno == EAGAIN) return;
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
if (len < 0 && errno == EWOULDBLOCK) return;
#endif
/* 0 == len || (len < 0 && (errno is a non-recoverable error)) */
con->close_timeout_ts = srv->cur_ts - (HTTP_LINGER_TIMEOUT+1);
}
static void connection_handle_close_state(server *srv, connection *con) {
connection_read_for_eos(srv, con);
if (srv->cur_ts - con->close_timeout_ts > HTTP_LINGER_TIMEOUT) {
connection_close(srv, con);
}
}
static void connection_handle_shutdown(server *srv, connection *con) {
int r;
#ifdef USE_OPENSSL
server_socket *srv_sock = con->srv_socket;
if (srv_sock->is_ssl && SSL_is_init_finished(con->ssl)) {
int ret, ssl_r;
unsigned long err;
ERR_clear_error();
switch ((ret = SSL_shutdown(con->ssl))) {
case 1:
/* ok */
break;
case 0:
/* wait for fd-event
*
* FIXME: wait for fdevent and call SSL_shutdown again
*
*/
ERR_clear_error();
if (-1 != (ret = SSL_shutdown(con->ssl))) break;
/* fall through */
default:
switch ((ssl_r = SSL_get_error(con->ssl, ret))) {
case SSL_ERROR_ZERO_RETURN:
break;
case SSL_ERROR_WANT_WRITE:
/*con->is_writable = -1;*//*(no effect; shutdown() called below)*/
case SSL_ERROR_WANT_READ:
break;
case SSL_ERROR_SYSCALL:
/* perhaps we have error waiting in our error-queue */
if (0 != (err = ERR_get_error())) {
do {
log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:",
ssl_r, ret,
ERR_error_string(err, NULL));
} while((err = ERR_get_error()));
} else if (errno != 0) { /* ssl bug (see lighttpd ticket #2213): sometimes errno == 0 */
switch(errno) {
case EPIPE:
case ECONNRESET:
break;
default:
log_error_write(srv, __FILE__, __LINE__, "sddds", "SSL (error):",
ssl_r, ret, errno,
strerror(errno));
break;
}
}
break;
default:
while((err = ERR_get_error())) {
log_error_write(srv, __FILE__, __LINE__, "sdds", "SSL:",
ssl_r, ret,
ERR_error_string(err, NULL));
}
break;
}
}
ERR_clear_error();
}
#endif
switch(r = plugins_call_handle_connection_close(srv, con)) {
case HANDLER_GO_ON:
case HANDLER_FINISHED:
break;
default:
log_error_write(srv, __FILE__, __LINE__, "sd", "unhandling return value", r);
break;
}
srv->con_closed++;
connection_reset(srv, con);
/* plugins should have cleaned themselves up */
for (size_t i = 0; i < srv->plugins.used; ++i) {
plugin *p = ((plugin **)(srv->plugins.ptr))[i];
plugin_data *pd = p->data;
if (!pd || NULL == con->plugin_ctx[pd->id]) continue;
log_error_write(srv, __FILE__, __LINE__, "sb",
"missing cleanup in", p->name);
con->plugin_ctx[pd->id] = NULL;
}
/* close the connection */
if ((0 == shutdown(con->fd, SHUT_WR))) {
con->close_timeout_ts = srv->cur_ts;
connection_set_state(srv, con, CON_STATE_CLOSE);
if (srv->srvconf.log_state_handling) {
log_error_write(srv, __FILE__, __LINE__, "sd",
"shutdown for fd", con->fd);
}
} else {
connection_close(srv, con);
}
}
static void connection_handle_response_end_state(server *srv, connection *con) {
/* log the request */
/* (even if error, connection dropped, still write to access log if http_status) */
if (con->http_status) {
plugins_call_handle_request_done(srv, con);
}
if (con->state != CON_STATE_ERROR) srv->con_written++;
if (con->request.content_length != con->request_content_queue->bytes_in
|| con->state == CON_STATE_ERROR) {
/* request body is present and has not been read completely */
con->keep_alive = 0;
}
if (con->keep_alive) {
connection_reset(srv, con);
#if 0
con->request_start = srv->cur_ts;
con->read_idle_ts = srv->cur_ts;
#endif
connection_set_state(srv, con, CON_STATE_REQUEST_START);
} else {
connection_handle_shutdown(srv, con);
}
}
static void connection_handle_errdoc_init(server *srv, connection *con) {
/* modules that produce headers required with error response should
* typically also produce an error document. Make an exception for
* mod_auth WWW-Authenticate response header. */
buffer *www_auth = NULL;
if (401 == con->http_status) {
data_string *ds = (data_string *)array_get_element(con->response.headers, "WWW-Authenticate");
if (NULL != ds) {
www_auth = buffer_init_buffer(ds->value);
}
}
con->response.transfer_encoding = 0;
buffer_reset(con->physical.path);
array_reset(con->response.headers);
chunkqueue_reset(con->write_queue);
if (NULL != www_auth) {
response_header_insert(srv, con, CONST_STR_LEN("WWW-Authenticate"), CONST_BUF_LEN(www_auth));
buffer_free(www_auth);
}
}
static int connection_handle_write_prepare(server *srv, connection *con) {
if (con->mode == DIRECT) {
/* static files */
switch(con->request.http_method) {
case HTTP_METHOD_GET:
case HTTP_METHOD_POST:
case HTTP_METHOD_HEAD:
break;
case HTTP_METHOD_OPTIONS:
/*
* 400 is coming from the request-parser BEFORE uri.path is set
* 403 is from the response handler when noone else catched it
*
* */
if ((!con->http_status || con->http_status == 200) && !buffer_string_is_empty(con->uri.path) &&
con->uri.path->ptr[0] != '*') {
response_header_insert(srv, con, CONST_STR_LEN("Allow"), CONST_STR_LEN("OPTIONS, GET, HEAD, POST"));
con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED;
con->parsed_response &= ~HTTP_CONTENT_LENGTH;
con->http_status = 200;
con->file_finished = 1;
chunkqueue_reset(con->write_queue);
}
break;
default:
if (0 == con->http_status) {
con->http_status = 501;
}
break;
}
}
if (con->http_status == 0) {
con->http_status = 403;
}
switch(con->http_status) {
case 204: /* class: header only */
case 205:
case 304:
/* disable chunked encoding again as we have no body */
con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED;
con->parsed_response &= ~HTTP_CONTENT_LENGTH;
chunkqueue_reset(con->write_queue);
con->file_finished = 1;
break;
default: /* class: header + body */
if (con->mode != DIRECT) break;
/* only custom body for 4xx and 5xx */
if (con->http_status < 400 || con->http_status >= 600) break;
con->file_finished = 0;
connection_handle_errdoc_init(srv, con);
/* try to send static errorfile */
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
if (!buffer_string_is_empty(con->conf.errorfile_prefix)) {
stat_cache_entry *sce = NULL;
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_copy_buffer(con->physical.path, con->conf.errorfile_prefix);
buffer_append_int(con->physical.path, con->http_status);
buffer_append_string_len(con->physical.path, CONST_STR_LEN(".html"));
[core] open fd when appending file to cq (fixes #2655) http_chunk_append_file() opens fd when appending file to chunkqueue. Defers calculation of content length until response is finished. This reduces race conditions pertaining to stat() and then (later) open(), when the result of the stat() was used for Content-Length or to generate chunked headers. Note: this does not change how lighttpd handles files that are modified in-place by another process after having been opened by lighttpd -- don't do that. This *does* improve handling of files that are frequently modified via a temporary file and then atomically renamed into place. mod_fastcgi has been modified to use http_chunk_append_file_range() with X-Sendfile2 and will open the target file multiple times if there are multiple ranges. Note: (future todo) not implemented for chunk.[ch] interfaces used by range requests in mod_staticfile or by mod_ssi. Those uses could lead to too many open fds. For mod_staticfile, limits should be put in place for max number of ranges accepted by mod_staticfile. For mod_ssi, limits would need to be placed on the maximum number of includes, and the primary SSI file split across lots of SSI directives should either copy the pieces or perhaps chunk.h could be extended to allow for an open fd to be shared across multiple chunks. Doing either of these would improve the performance of SSI since they would replace many file opens on the pieces of the SSI file around the SSI directives. x-ref: "Serving a file that is getting updated can cause an empty response or incorrect content-length error" https://redmine.lighttpd.net/issues/2655 github: Closes #49
2016-03-30 10:39:33 +00:00
if (0 == http_chunk_append_file(srv, con, con->physical.path)) {
con->file_finished = 1;
[core] open fd when appending file to cq (fixes #2655) http_chunk_append_file() opens fd when appending file to chunkqueue. Defers calculation of content length until response is finished. This reduces race conditions pertaining to stat() and then (later) open(), when the result of the stat() was used for Content-Length or to generate chunked headers. Note: this does not change how lighttpd handles files that are modified in-place by another process after having been opened by lighttpd -- don't do that. This *does* improve handling of files that are frequently modified via a temporary file and then atomically renamed into place. mod_fastcgi has been modified to use http_chunk_append_file_range() with X-Sendfile2 and will open the target file multiple times if there are multiple ranges. Note: (future todo) not implemented for chunk.[ch] interfaces used by range requests in mod_staticfile or by mod_ssi. Those uses could lead to too many open fds. For mod_staticfile, limits should be put in place for max number of ranges accepted by mod_staticfile. For mod_ssi, limits would need to be placed on the maximum number of includes, and the primary SSI file split across lots of SSI directives should either copy the pieces or perhaps chunk.h could be extended to allow for an open fd to be shared across multiple chunks. Doing either of these would improve the performance of SSI since they would replace many file opens on the pieces of the SSI file around the SSI directives. x-ref: "Serving a file that is getting updated can cause an empty response or incorrect content-length error" https://redmine.lighttpd.net/issues/2655 github: Closes #49
2016-03-30 10:39:33 +00:00
if (HANDLER_ERROR != stat_cache_get_entry(srv, con, con->physical.path, &sce)) {
response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_BUF_LEN(sce->content_type));
}
}
}
if (!con->file_finished) {
buffer *b;
buffer_reset(con->physical.path);
con->file_finished = 1;
b = buffer_init();
/* build default error-page */
buffer_copy_string_len(b, CONST_STR_LEN(
"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
"<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n"
" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"
"<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"en\" lang=\"en\">\n"
" <head>\n"
" <title>"));
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_append_int(b, con->http_status);
buffer_append_string_len(b, CONST_STR_LEN(" - "));
buffer_append_string(b, get_http_status_name(con->http_status));
buffer_append_string_len(b, CONST_STR_LEN(
"</title>\n"
" </head>\n"
" <body>\n"
" <h1>"));
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_append_int(b, con->http_status);
buffer_append_string_len(b, CONST_STR_LEN(" - "));
buffer_append_string(b, get_http_status_name(con->http_status));
buffer_append_string_len(b, CONST_STR_LEN("</h1>\n"
" </body>\n"
"</html>\n"
));
(void)http_chunk_append_buffer(srv, con, b);
buffer_free(b);
response_header_overwrite(srv, con, CONST_STR_LEN("Content-Type"), CONST_STR_LEN("text/html"));
}
break;
}
/* Allow filter plugins to change response headers before they are written. */
switch(plugins_call_handle_response_start(srv, con)) {
case HANDLER_GO_ON:
case HANDLER_FINISHED:
break;
default:
log_error_write(srv, __FILE__, __LINE__, "s", "response_start plugin failed");
return -1;
}
if (con->file_finished) {
/* we have all the content and chunked encoding is not used, set a content-length */
if ((!(con->parsed_response & HTTP_CONTENT_LENGTH)) &&
(con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0) {
off_t qlen = chunkqueue_length(con->write_queue);
/**
* The Content-Length header only can be sent if we have content:
* - HEAD doesn't have a content-body (but have a content-length)
* - 1xx, 204 and 304 don't have a content-body (RFC 2616 Section 4.3)
*
* Otherwise generate a Content-Length header as chunked encoding is not
* available
*/
if ((con->http_status >= 100 && con->http_status < 200) ||
con->http_status == 204 ||
con->http_status == 304) {
data_string *ds;
/* no Content-Body, no Content-Length */
if (NULL != (ds = (data_string*) array_get_element(con->response.headers, "Content-Length"))) {
buffer_reset(ds->value); /* Headers with empty values are ignored for output */
}
} else if (qlen > 0 || con->request.http_method != HTTP_METHOD_HEAD) {
/* qlen = 0 is important for Redirects (301, ...) as they MAY have
* a content. Browsers are waiting for a Content otherwise
*/
fix buffer, chunk and http_chunk API * remove unused structs and functions (buffer_array, read_buffer) * change return type from int to void for many functions, as the return value (indicating error/success) was never checked, and the function would only fail on programming errors and not on invalid input; changed functions to use force_assert instead of returning an error. * all "len" parameters now are the real size of the memory to be read. the length of strings is given always without the terminating 0. * the "buffer" struct still counts the terminating 0 in ->used, provide buffer_string_length() to get the length of a string in a buffer. unset config "strings" have used == 0, which is used in some places to distinguish unset values from "" (empty string) values. * most buffer usages should now use it as string container. * optimise some buffer copying by "moving" data to other buffers * use (u)intmax_t for generic int-to-string functions * remove unused enum values: UNUSED_CHUNK, ENCODING_UNSET * converted BUFFER_APPEND_SLASH to inline function (no macro feature needed) * refactor: create chunkqueue_steal: moving (partial) chunks into another queue * http_chunk: added separate function to terminate chunked body instead of magic handling in http_chunk_append_mem(). http_chunk_append_* now handle empty chunks, and never terminate the chunked body. From: Stefan Bühler <stbuehler@web.de> git-svn-id: svn://svn.lighttpd.net/lighttpd/branches/lighttpd-1.4.x@2975 152afb58-edef-0310-8abb-c4023f1b3aa9
2015-02-08 12:37:10 +00:00
buffer_copy_int(srv->tmp_buf, qlen);
response_header_overwrite(srv, con, CONST_STR_LEN("Content-Length"), CONST_BUF_LEN(srv->tmp_buf));
}
}
} else {
/**
* the file isn't finished yet, but we have all headers
*
* to get keep-alive we either need:
* - Content-Length: ... (HTTP/1.0 and HTTP/1.0) or
* - Transfer-Encoding: chunked (HTTP/1.1)
*/
if (((con->parsed_response & HTTP_CONTENT_LENGTH) == 0) &&
((con->response.transfer_encoding & HTTP_TRANSFER_ENCODING_CHUNKED) == 0)) {
if (con->request.http_version == HTTP_VERSION_1_1) {
off_t qlen = chunkqueue_length(con->write_queue);
con->response.transfer_encoding = HTTP_TRANSFER_ENCODING_CHUNKED;
if (qlen) {
/* create initial Transfer-Encoding: chunked segment */
buffer *b = srv->tmp_chunk_len;
buffer_string_set_length(b, 0);
buffer_append_uint_hex(b, (uintmax_t)qlen);
buffer_append_string_len(b, CONST_STR_LEN("\r\n"));
chunkqueue_prepend_buffer(con->write_queue, b);
chunkqueue_append_mem(con->write_queue, CONST_STR_LEN("\r\n"));
}
} else {
con->keep_alive = 0;
}
}
/**
* if the backend sent a Connection: close, follow the wish
*
* NOTE: if the backend sent Connection: Keep-Alive, but no Content-Length, we
* will close the connection. That's fine. We can always decide the close
* the connection
*
* FIXME: to be nice we should remove the Connection: ...
*/
if (con->parsed_response & HTTP_CONNECTION) {
/* a subrequest disable keep-alive although the client wanted it */
if (con->keep_alive && !con->response.keep_alive) {
con->keep_alive = 0;
}
}
}
if (con->request.http_method == HTTP_METHOD_HEAD) {
/**
* a HEAD request has the same as a GET
* without the content
*/
con->file_finished = 1;
chunkqueue_reset(con->write_queue);
con->response.transfer_encoding &= ~HTTP_TRANSFER_ENCODING_CHUNKED;
}
http_response_write_header(srv, con);
return 0;
}
static int connection_handle_write(server *srv, connection *con) {
switch(network_write_chunkqueue(srv, con, con->write_queue, MAX_WRITE_LIMIT)) {
case 0:
con->write_request_ts = srv->cur_ts;
if (con->file_finished) {
connection_set_state(srv, con, CON_STATE_RESPONSE_END);
}
break;
case -1: /* error on our side */
log_error_write(srv, __FILE__, __LINE__, "sd",
"connection closed: write failed on fd", con->fd);
connection_set_state(srv, con, CON_STATE_ERROR);
break;
case -2: /* remote close */
connection_set_state(srv, con, CON_STATE_ERROR);
break;
case 1:
con->write_request_ts = srv->cur_ts;
con->is_writable = 0;
/* not finished yet -> WRITE */
break;
}
return 0;
}
connection *connection_init(server *srv) {
connection *con;
UNUSED(srv);
con = calloc(1, sizeof(*con));
force_assert(NULL != con);
con->fd = 0;
con->ndx = -1;
con->fde_ndx = -1;
con->bytes_written = 0;
con->bytes_read = 0;
con->bytes_header = 0;
con->loops_per_request = 0;
#define CLEAN(x) \
con->x = buffer_init();
CLEAN(request.uri);
CLEAN(request.request_line);
CLEAN(request.request);
CLEAN(request.pathinfo);
CLEAN(request.orig_uri);
CLEAN(uri.scheme);
CLEAN(uri.authority);
CLEAN(uri.path);
CLEAN(uri.path_raw);
CLEAN(uri.query);
CLEAN(physical.doc_root);
CLEAN(physical.path);
CLEAN(physical.basedir);
CLEAN(physical.rel_path);
CLEAN(physical.etag);
CLEAN(parse_request);
CLEAN(server_name);
CLEAN(dst_addr_buf);
#if defined USE_OPENSSL && ! defined OPENSSL_NO_TLSEXT
CLEAN(tlsext_server_name);
#endif
#undef CLEAN
con->write_queue = chunkqueue_init();
con->read_queue = chunkqueue_init();
con->request_content_queue = chunkqueue_init();
con->request.headers = array_init();
con->response.headers = array_init();
con->environment = array_init();
/* init plugin specific connection structures */
con->plugin_ctx = calloc(1, (srv->plugins.used + 1) * sizeof(void *));
force_assert(NULL != con->plugin_ctx);
con->cond_cache = calloc(srv->config_context->used, sizeof(cond_cache_t));
force_assert(NULL != con->cond_cache);
config_setup_connection(srv, con);
return con;
}
void connections_free(server *srv) {
connections *conns = srv->conns;
size_t i;
for (i = 0; i < conns->size; i++) {
connection *con = conns->ptr[i];
connection_reset(srv, con);
chunkqueue_free(con->write_queue);
chunkqueue_free(con->read_queue);
chunkqueue_free(con->request_content_queue);
array_free(con->request.headers);
array_free(con->response.headers);
array_free(con->environment);
#define CLEAN(x) \
buffer_free(con->x);
CLEAN(request.uri);
CLEAN(request.request_line);
CLEAN(request.request);
CLEAN(request.pathinfo);
CLEAN(request.orig_uri);
CLEAN(uri.scheme);
CLEAN(uri.authority);
CLEAN(uri.path);
CLEAN(uri.path_raw);
CLEAN(uri.query);
CLEAN(physical.doc_root);
CLEAN(physical.path);
CLEAN(physical.basedir);
CLEAN(physical.etag);
CLEAN(physical.rel_path);
CLEAN(parse_request);
CLEAN(server_name);
CLEAN(dst_addr_buf);
#if defined USE_OPENSSL && ! defined OPENSSL_NO_TLSEXT
CLEAN(tlsext_server_name);
#endif
#undef CLEAN
free(con->plugin_ctx);
free(con->cond_cache);
free(con);
}
free(conns->ptr);
}