lighttpd1.4/src/mod_cgi.c

1068 lines
30 KiB
C
Raw Normal View History

#include "first.h"
#include "server.h"
#include "stat_cache.h"
#include "keyvalue.h"
#include "log.h"
#include "connections.h"
#include "joblist.h"
#include "response.h"
#include "http_chunk.h"
#include "plugin.h"
#include <sys/types.h>
#include "sys-mmap.h"
#include "sys-socket.h"
# include <sys/wait.h>
#include <unistd.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <fdevent.h>
#include <fcntl.h>
2017-07-31 02:28:13 +00:00
#include <signal.h>
static int pipe_cloexec(int pipefd[2]) {
#ifdef HAVE_PIPE2
if (0 == pipe2(pipefd, O_CLOEXEC)) return 0;
#endif
return 0 == pipe(pipefd)
#ifdef FD_CLOEXEC
&& 0 == fcntl(pipefd[0], F_SETFD, FD_CLOEXEC)
&& 0 == fcntl(pipefd[1], F_SETFD, FD_CLOEXEC)
#endif
? 0
: -1;
}
typedef struct {
char **ptr;
size_t size;
size_t used;
} char_array;
typedef struct {
struct { pid_t pid; void *ctx; } *ptr;
size_t used;
size_t size;
} buffer_pid_t;
typedef struct {
array *cgi;
unsigned short execute_x_only;
unsigned short local_redir;
unsigned short xsendfile_allow;
unsigned short upgrade;
array *xsendfile_docroot;
} plugin_config;
typedef struct {
PLUGIN_DATA;
buffer_pid_t cgi_pid;
plugin_config **config_storage;
plugin_config conf;
} plugin_data;
typedef struct {
pid_t pid;
int fd;
int fdtocgi;
int fde_ndx; /* index into the fd-event buffer */
int fde_ndx_tocgi; /* index into the fd-event buffer */
connection *remote_conn; /* dumb pointer */
plugin_data *plugin_data; /* dumb pointer */
buffer *response;
buffer *cgi_handler; /* dumb pointer */
http_response_opts opts;
plugin_config conf;
} handler_ctx;
static handler_ctx * cgi_handler_ctx_init(void) {
handler_ctx *hctx = calloc(1, sizeof(*hctx));
force_assert(hctx);
hctx->response = buffer_init();
hctx->fd = -1;
hctx->fdtocgi = -1;
return hctx;
}
static void cgi_handler_ctx_free(handler_ctx *hctx) {
buffer_free(hctx->response);
free(hctx);
}
INIT_FUNC(mod_cgi_init) {
plugin_data *p;
p = calloc(1, sizeof(*p));
force_assert(p);
return p;
}
FREE_FUNC(mod_cgi_free) {
plugin_data *p = p_d;
buffer_pid_t *r = &(p->cgi_pid);
UNUSED(srv);
if (p->config_storage) {
size_t i;
for (i = 0; i < srv->config_context->used; i++) {
plugin_config *s = p->config_storage[i];
if (NULL == s) continue;
array_free(s->cgi);
array_free(s->xsendfile_docroot);
free(s);
}
free(p->config_storage);
}
if (r->ptr) free(r->ptr);
free(p);
return HANDLER_GO_ON;
}
SETDEFAULTS_FUNC(mod_fastcgi_set_defaults) {
plugin_data *p = p_d;
size_t i = 0;
config_values_t cv[] = {
{ "cgi.assign", NULL, T_CONFIG_ARRAY, T_CONFIG_SCOPE_CONNECTION }, /* 0 */
{ "cgi.execute-x-only", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, /* 1 */
{ "cgi.x-sendfile", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, /* 2 */
{ "cgi.x-sendfile-docroot", NULL, T_CONFIG_ARRAY, T_CONFIG_SCOPE_CONNECTION }, /* 3 */
{ "cgi.local-redir", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, /* 4 */
{ "cgi.upgrade", NULL, T_CONFIG_BOOLEAN, T_CONFIG_SCOPE_CONNECTION }, /* 5 */
{ NULL, NULL, T_CONFIG_UNSET, T_CONFIG_SCOPE_UNSET}
};
if (!p) return HANDLER_ERROR;
p->config_storage = calloc(1, srv->config_context->used * sizeof(plugin_config *));
force_assert(p->config_storage);
for (i = 0; i < srv->config_context->used; i++) {
data_config const* config = (data_config const*)srv->config_context->data[i];
plugin_config *s;
s = calloc(1, sizeof(plugin_config));
force_assert(s);
s->cgi = array_init();
s->execute_x_only = 0;
s->local_redir = 0;
s->xsendfile_allow= 0;
s->xsendfile_docroot = array_init();
s->upgrade = 0;
cv[0].destination = s->cgi;
cv[1].destination = &(s->execute_x_only);
cv[2].destination = &(s->xsendfile_allow);
cv[3].destination = s->xsendfile_docroot;
cv[4].destination = &(s->local_redir);
cv[5].destination = &(s->upgrade);
p->config_storage[i] = s;
if (0 != config_insert_values_global(srv, config->value, cv, i == 0 ? T_CONFIG_SCOPE_SERVER : T_CONFIG_SCOPE_CONNECTION)) {
return HANDLER_ERROR;
}
if (!array_is_kvstring(s->cgi)) {
log_error_write(srv, __FILE__, __LINE__, "s",
"unexpected value for cgi.assign; expected list of \"ext\" => \"exepath\"");
return HANDLER_ERROR;
}
if (s->xsendfile_docroot->used) {
size_t j;
for (j = 0; j < s->xsendfile_docroot->used; ++j) {
data_string *ds = (data_string *)s->xsendfile_docroot->data[j];
if (ds->type != TYPE_STRING) {
log_error_write(srv, __FILE__, __LINE__, "s",
"unexpected type for key cgi.x-sendfile-docroot; expected: cgi.x-sendfile-docroot = ( \"/allowed/path\", ... )");
return HANDLER_ERROR;
}
if (ds->value->ptr[0] != '/') {
log_error_write(srv, __FILE__, __LINE__, "SBs",
"cgi.x-sendfile-docroot paths must begin with '/'; invalid: \"", ds->value, "\"");
return HANDLER_ERROR;
}
buffer_path_simplify(ds->value, ds->value);
buffer_append_slash(ds->value);
}
}
}
return HANDLER_GO_ON;
}
static void cgi_pid_add(plugin_data *p, pid_t pid, void *ctx) {
buffer_pid_t *r = &(p->cgi_pid);
if (r->size == 0) {
r->size = 16;
r->ptr = malloc(sizeof(*r->ptr) * r->size);
force_assert(r->ptr);
} else if (r->used == r->size) {
r->size += 16;
r->ptr = realloc(r->ptr, sizeof(*r->ptr) * r->size);
force_assert(r->ptr);
}
r->ptr[r->used].pid = pid;
r->ptr[r->used].ctx = ctx;
++r->used;
}
static void cgi_pid_kill(plugin_data *p, pid_t pid) {
buffer_pid_t *r = &(p->cgi_pid);
for (size_t i = 0; i < r->used; ++i) {
if (r->ptr[i].pid == pid) {
r->ptr[i].ctx = NULL;
kill(pid, SIGTERM);
return;
}
}
}
static void cgi_pid_del(plugin_data *p, size_t i) {
buffer_pid_t *r = &(p->cgi_pid);
if (i != r->used - 1) {
r->ptr[i] = r->ptr[r->used - 1];
}
r->used--;
}
static void cgi_connection_close_fdtocgi(server *srv, handler_ctx *hctx) {
/*(closes only hctx->fdtocgi)*/
fdevent_event_del(srv->ev, &(hctx->fde_ndx_tocgi), hctx->fdtocgi);
/*fdevent_unregister(srv->ev, hctx->fdtocgi);*//*(handled below)*/
fdevent_sched_close(srv->ev, hctx->fdtocgi, 0);
hctx->fdtocgi = -1;
}
static void cgi_connection_close(server *srv, handler_ctx *hctx) {
plugin_data *p = hctx->plugin_data;
connection *con = hctx->remote_conn;
/* the connection to the browser went away, but we still have a connection
* to the CGI script
*
* close cgi-connection
*/
if (hctx->fd != -1) {
/* close connection to the cgi-script */
fdevent_event_del(srv->ev, &(hctx->fde_ndx), hctx->fd);
/*fdevent_unregister(srv->ev, hctx->fd);*//*(handled below)*/
fdevent_sched_close(srv->ev, hctx->fd, 0);
}
if (hctx->fdtocgi != -1) {
cgi_connection_close_fdtocgi(srv, hctx); /*(closes only hctx->fdtocgi)*/
}
if (hctx->pid > 0) {
cgi_pid_kill(p, hctx->pid);
}
con->plugin_ctx[p->id] = NULL;
cgi_handler_ctx_free(hctx);
/* finish response (if not already con->file_started, con->file_finished) */
if (con->mode == p->id) {
http_response_backend_done(srv, con);
}
}
static handler_t cgi_connection_close_callback(server *srv, connection *con, void *p_d) {
plugin_data *p = p_d;
handler_ctx *hctx = con->plugin_ctx[p->id];
if (hctx) cgi_connection_close(srv, hctx);
return HANDLER_GO_ON;
}
static int cgi_write_request(server *srv, handler_ctx *hctx, int fd);
static handler_t cgi_handle_fdevent_send (server *srv, void *ctx, int revents) {
handler_ctx *hctx = ctx;
connection *con = hctx->remote_conn;
/*(joblist only actually necessary here in mod_cgi fdevent send if returning HANDLER_ERROR)*/
joblist_append(srv, con);
if (revents & FDEVENT_OUT) {
if (0 != cgi_write_request(srv, hctx, hctx->fdtocgi)) {
cgi_connection_close(srv, hctx);
return HANDLER_ERROR;
}
/* more request body to be sent to CGI */
}
if (revents & FDEVENT_HUP) {
/* skip sending remaining data to CGI */
if (con->request.content_length) {
chunkqueue *cq = con->request_content_queue;
chunkqueue_mark_written(cq, chunkqueue_length(cq));
if (cq->bytes_in != (off_t)con->request.content_length) {
con->keep_alive = 0;
}
}
cgi_connection_close_fdtocgi(srv, hctx); /*(closes only hctx->fdtocgi)*/
} else if (revents & FDEVENT_ERR) {
/* kill all connections to the cgi process */
#if 1
log_error_write(srv, __FILE__, __LINE__, "s", "cgi-FDEVENT_ERR");
#endif
cgi_connection_close(srv, hctx);
return HANDLER_ERROR;
}
return HANDLER_FINISHED;
}
[core] shared code for socket backends common codebase for socket backends, based off mod_fastcgi with some features added for mod_proxy (mostly intended to reduce code duplication and enhance code isolation) mod_fastcgi and mod_scgi can now use fastcgi.balance and scgi.balance for similar behavior as proxy.balance, but the balancing is per-host and not per-proc. proxy.balance is also per-host and not per-proc. mod_proxy and mod_scgi can now use proxy.map-extensions and scgi.map-extensions, similar to fastcgi.map-extensions. mod_fastcgi behavior change (affects only mod_status): - statistics tags have been renamed from "fastcgi.*" to "gw.*" "fastcgi.backend.*" -> "gw.backend.*" "fastcgi.active-requests" -> "gw.active-requests" ("fastcgi.requests" remains "fastcgi.requests") ("proxy.requests" is new) ("scgi.requests" is new) mod_scgi behavior change (likely minor): - removed scgi_proclist_sort_down() and scgi_proclist_sort_up(). procs now chosen based on load as measured by num socket connnections Note: modules using gw_backend.[ch] are currently still independent modules. If it had been written as a single module with fastcgi, scgi, proxy implementations, then there would have been a chance of breaking some existing user configurations where module ordering made a difference for which module handled a given request, though for most people, this would have made no difference. Details about mod_fastcgi code transformations: unsigned int debug -> int debug fastcgi_env member removed from plugin_config renamed "fcgi" and "fastcgi" to "gw", and "FCGI" to "GW" reorganize routines for high-level and lower-level interfaces some lower-level internal interfaces changed to use host,proc,debug args rather than knowing about higher-level (app) hctx and plugin_data tabs->spaces and reformatting
2017-07-14 05:29:18 +00:00
static handler_t cgi_response_headers(server *srv, connection *con, struct http_response_opts_t *opts) {
/* response headers just completed */
[core] shared code for socket backends common codebase for socket backends, based off mod_fastcgi with some features added for mod_proxy (mostly intended to reduce code duplication and enhance code isolation) mod_fastcgi and mod_scgi can now use fastcgi.balance and scgi.balance for similar behavior as proxy.balance, but the balancing is per-host and not per-proc. proxy.balance is also per-host and not per-proc. mod_proxy and mod_scgi can now use proxy.map-extensions and scgi.map-extensions, similar to fastcgi.map-extensions. mod_fastcgi behavior change (affects only mod_status): - statistics tags have been renamed from "fastcgi.*" to "gw.*" "fastcgi.backend.*" -> "gw.backend.*" "fastcgi.active-requests" -> "gw.active-requests" ("fastcgi.requests" remains "fastcgi.requests") ("proxy.requests" is new) ("scgi.requests" is new) mod_scgi behavior change (likely minor): - removed scgi_proclist_sort_down() and scgi_proclist_sort_up(). procs now chosen based on load as measured by num socket connnections Note: modules using gw_backend.[ch] are currently still independent modules. If it had been written as a single module with fastcgi, scgi, proxy implementations, then there would have been a chance of breaking some existing user configurations where module ordering made a difference for which module handled a given request, though for most people, this would have made no difference. Details about mod_fastcgi code transformations: unsigned int debug -> int debug fastcgi_env member removed from plugin_config renamed "fcgi" and "fastcgi" to "gw", and "FCGI" to "GW" reorganize routines for high-level and lower-level interfaces some lower-level internal interfaces changed to use host,proc,debug args rather than knowing about higher-level (app) hctx and plugin_data tabs->spaces and reformatting
2017-07-14 05:29:18 +00:00
handler_ctx *hctx = (handler_ctx *)opts->pdata;
if (con->parsed_response & HTTP_UPGRADE) {
if (hctx->conf.upgrade && con->http_status == 101) {
/* 101 Switching Protocols; transition to transparent proxy */
http_response_upgrade_read_body_unknown(srv, con);
}
else {
con->parsed_response &= ~HTTP_UPGRADE;
#if 0
/* preserve prior questionable behavior; likely broken behavior
* anyway if backend thinks connection is being upgraded but client
* does not receive Connection: upgrade */
response_header_overwrite(srv, con, CONST_STR_LEN("Upgrade"),
CONST_STR_LEN(""));
#endif
}
}
if (hctx->conf.upgrade && !(con->parsed_response & HTTP_UPGRADE)) {
chunkqueue *cq = con->request_content_queue;
hctx->conf.upgrade = 0;
if (cq->bytes_out == (off_t)con->request.content_length) {
cgi_connection_close_fdtocgi(srv, hctx); /*(closes hctx->fdtocgi)*/
}
}
[core] shared code for socket backends common codebase for socket backends, based off mod_fastcgi with some features added for mod_proxy (mostly intended to reduce code duplication and enhance code isolation) mod_fastcgi and mod_scgi can now use fastcgi.balance and scgi.balance for similar behavior as proxy.balance, but the balancing is per-host and not per-proc. proxy.balance is also per-host and not per-proc. mod_proxy and mod_scgi can now use proxy.map-extensions and scgi.map-extensions, similar to fastcgi.map-extensions. mod_fastcgi behavior change (affects only mod_status): - statistics tags have been renamed from "fastcgi.*" to "gw.*" "fastcgi.backend.*" -> "gw.backend.*" "fastcgi.active-requests" -> "gw.active-requests" ("fastcgi.requests" remains "fastcgi.requests") ("proxy.requests" is new) ("scgi.requests" is new) mod_scgi behavior change (likely minor): - removed scgi_proclist_sort_down() and scgi_proclist_sort_up(). procs now chosen based on load as measured by num socket connnections Note: modules using gw_backend.[ch] are currently still independent modules. If it had been written as a single module with fastcgi, scgi, proxy implementations, then there would have been a chance of breaking some existing user configurations where module ordering made a difference for which module handled a given request, though for most people, this would have made no difference. Details about mod_fastcgi code transformations: unsigned int debug -> int debug fastcgi_env member removed from plugin_config renamed "fcgi" and "fastcgi" to "gw", and "FCGI" to "GW" reorganize routines for high-level and lower-level interfaces some lower-level internal interfaces changed to use host,proc,debug args rather than knowing about higher-level (app) hctx and plugin_data tabs->spaces and reformatting
2017-07-14 05:29:18 +00:00
return HANDLER_GO_ON;
}
static int cgi_recv_response(server *srv, handler_ctx *hctx) {
[core] shared code for socket backends common codebase for socket backends, based off mod_fastcgi with some features added for mod_proxy (mostly intended to reduce code duplication and enhance code isolation) mod_fastcgi and mod_scgi can now use fastcgi.balance and scgi.balance for similar behavior as proxy.balance, but the balancing is per-host and not per-proc. proxy.balance is also per-host and not per-proc. mod_proxy and mod_scgi can now use proxy.map-extensions and scgi.map-extensions, similar to fastcgi.map-extensions. mod_fastcgi behavior change (affects only mod_status): - statistics tags have been renamed from "fastcgi.*" to "gw.*" "fastcgi.backend.*" -> "gw.backend.*" "fastcgi.active-requests" -> "gw.active-requests" ("fastcgi.requests" remains "fastcgi.requests") ("proxy.requests" is new) ("scgi.requests" is new) mod_scgi behavior change (likely minor): - removed scgi_proclist_sort_down() and scgi_proclist_sort_up(). procs now chosen based on load as measured by num socket connnections Note: modules using gw_backend.[ch] are currently still independent modules. If it had been written as a single module with fastcgi, scgi, proxy implementations, then there would have been a chance of breaking some existing user configurations where module ordering made a difference for which module handled a given request, though for most people, this would have made no difference. Details about mod_fastcgi code transformations: unsigned int debug -> int debug fastcgi_env member removed from plugin_config renamed "fcgi" and "fastcgi" to "gw", and "FCGI" to "GW" reorganize routines for high-level and lower-level interfaces some lower-level internal interfaces changed to use host,proc,debug args rather than knowing about higher-level (app) hctx and plugin_data tabs->spaces and reformatting
2017-07-14 05:29:18 +00:00
switch (http_response_read(srv, hctx->remote_conn, &hctx->opts,
hctx->response, hctx->fd, &hctx->fde_ndx)) {
default:
return HANDLER_GO_ON;
case HANDLER_ERROR:
http_response_backend_error(srv, hctx->remote_conn);
/* fall through */
case HANDLER_FINISHED:
cgi_connection_close(srv, hctx);
return HANDLER_FINISHED;
case HANDLER_COMEBACK:
/* hctx->conf.local_redir */
connection_response_reset(srv, hctx->remote_conn); /*(includes con->http_status = 0)*/
plugins_call_connection_reset(srv, hctx->remote_conn);
/*cgi_connection_close(srv, hctx);*//*(already cleaned up and hctx is now invalid)*/
return HANDLER_COMEBACK;
}
}
static handler_t cgi_handle_fdevent(server *srv, void *ctx, int revents) {
handler_ctx *hctx = ctx;
connection *con = hctx->remote_conn;
joblist_append(srv, con);
if (revents & FDEVENT_IN) {
handler_t rc = cgi_recv_response(srv, hctx);/*(might invalidate hctx)*/
if (rc != HANDLER_GO_ON) return rc; /*(unless HANDLER_GO_ON)*/
}
/* perhaps this issue is already handled */
if (revents & (FDEVENT_HUP|FDEVENT_RDHUP)) {
if (con->file_started) {
/* drain any remaining data from kernel pipe buffers
* even if (con->conf.stream_response_body
* & FDEVENT_STREAM_RESPONSE_BUFMIN)
* since event loop will spin on fd FDEVENT_HUP event
* until unregistered. */
handler_t rc;
const unsigned short flags = con->conf.stream_response_body;
con->conf.stream_response_body &= ~FDEVENT_STREAM_RESPONSE_BUFMIN;
con->conf.stream_response_body |= FDEVENT_STREAM_RESPONSE_POLLRDHUP;
do {
rc = cgi_recv_response(srv,hctx);/*(might invalidate hctx)*/
} while (rc == HANDLER_GO_ON); /*(unless HANDLER_GO_ON)*/
con->conf.stream_response_body = flags;
return rc; /* HANDLER_FINISHED or HANDLER_COMEBACK or HANDLER_ERROR */
} else if (!buffer_string_is_empty(hctx->response)) {
/* unfinished header package which is a body in reality */
con->file_started = 1;
if (0 != http_chunk_append_buffer(srv, con, hctx->response)) {
cgi_connection_close(srv, hctx);
return HANDLER_ERROR;
}
if (0 == con->http_status) con->http_status = 200; /* OK */
}
cgi_connection_close(srv, hctx);
} else if (revents & FDEVENT_ERR) {
/* kill all connections to the cgi process */
cgi_connection_close(srv, hctx);
return HANDLER_ERROR;
}
return HANDLER_FINISHED;
}
static int cgi_env_add(void *venv, const char *key, size_t key_len, const char *val, size_t val_len) {
char_array *env = venv;
char *dst;
if (!key || !val) return -1;
dst = malloc(key_len + val_len + 2);
force_assert(dst);
memcpy(dst, key, key_len);
dst[key_len] = '=';
memcpy(dst + key_len + 1, val, val_len);
dst[key_len + 1 + val_len] = '\0';
if (env->size == 0) {
env->size = 16;
env->ptr = malloc(env->size * sizeof(*env->ptr));
force_assert(env->ptr);
} else if (env->size == env->used) {
env->size += 16;
env->ptr = realloc(env->ptr, env->size * sizeof(*env->ptr));
force_assert(env->ptr);
}
env->ptr[env->used++] = dst;
return 0;
}
/*(improved from network_write_mmap.c)*/
static off_t mmap_align_offset(off_t start) {
static off_t pagemask = 0;
if (0 == pagemask) {
long pagesize = sysconf(_SC_PAGESIZE);
if (-1 == pagesize) pagesize = 4096;
pagemask = ~((off_t)pagesize - 1); /* pagesize always power-of-2 */
}
return (start & pagemask);
}
/* returns: 0: continue, -1: fatal error, -2: connection reset */
/* similar to network_write_file_chunk_mmap, but doesn't use send on windows (because we're on pipes),
* also mmaps and sends complete chunk instead of only small parts - the files
* are supposed to be temp files with reasonable chunk sizes.
*
* Also always use mmap; the files are "trusted", as we created them.
*/
static ssize_t cgi_write_file_chunk_mmap(server *srv, connection *con, int fd, chunkqueue *cq) {
chunk* const c = cq->first;
off_t offset, toSend, file_end;
ssize_t r;
size_t mmap_offset, mmap_avail;
char *data = NULL;
force_assert(NULL != c);
force_assert(FILE_CHUNK == c->type);
force_assert(c->offset >= 0 && c->offset <= c->file.length);
offset = c->file.start + c->offset;
toSend = c->file.length - c->offset;
file_end = c->file.start + c->file.length; /* offset to file end in this chunk */
if (0 == toSend) {
chunkqueue_remove_finished_chunks(cq);
return 0;
}
/*(simplified from chunk.c:chunkqueue_open_file_chunk())*/
UNUSED(con);
if (-1 == c->file.fd) {
if (-1 == (c->file.fd = fdevent_open_cloexec(c->file.name->ptr, O_RDONLY, 0))) {
log_error_write(srv, __FILE__, __LINE__, "ssb", "open failed:", strerror(errno), c->file.name);
return -1;
}
}
/* (re)mmap the buffer if range is not covered completely */
if (MAP_FAILED == c->file.mmap.start
|| offset < c->file.mmap.offset
|| file_end > (off_t)(c->file.mmap.offset + c->file.mmap.length)) {
if (MAP_FAILED != c->file.mmap.start) {
munmap(c->file.mmap.start, c->file.mmap.length);
c->file.mmap.start = MAP_FAILED;
}
c->file.mmap.offset = mmap_align_offset(offset);
c->file.mmap.length = file_end - c->file.mmap.offset;
if (MAP_FAILED == (c->file.mmap.start = mmap(NULL, c->file.mmap.length, PROT_READ, MAP_PRIVATE, c->file.fd, c->file.mmap.offset))) {
2016-04-18 03:37:40 +00:00
if (toSend > 65536) toSend = 65536;
data = malloc(toSend);
force_assert(data);
if (-1 == lseek(c->file.fd, offset, SEEK_SET)
|| 0 >= (toSend = read(c->file.fd, data, toSend))) {
if (-1 == toSend) {
log_error_write(srv, __FILE__, __LINE__, "ssbdo", "lseek/read failed:",
strerror(errno), c->file.name, c->file.fd, offset);
} else { /*(0 == toSend)*/
log_error_write(srv, __FILE__, __LINE__, "sbdo", "unexpected EOF (input truncated?):",
c->file.name, c->file.fd, offset);
}
free(data);
return -1;
}
}
}
2016-04-18 03:37:40 +00:00
if (MAP_FAILED != c->file.mmap.start) {
force_assert(offset >= c->file.mmap.offset);
mmap_offset = offset - c->file.mmap.offset;
force_assert(c->file.mmap.length > mmap_offset);
mmap_avail = c->file.mmap.length - mmap_offset;
force_assert(toSend <= (off_t) mmap_avail);
data = c->file.mmap.start + mmap_offset;
}
r = write(fd, data, toSend);
2016-04-18 03:37:40 +00:00
if (MAP_FAILED == c->file.mmap.start) free(data);
2016-04-18 03:37:40 +00:00
if (r < 0) {
switch (errno) {
case EAGAIN:
case EINTR:
return 0;
case EPIPE:
case ECONNRESET:
return -2;
default:
log_error_write(srv, __FILE__, __LINE__, "ssd",
"write failed:", strerror(errno), fd);
return -1;
}
}
if (r >= 0) {
chunkqueue_mark_written(cq, r);
}
return r;
}
static int cgi_write_request(server *srv, handler_ctx *hctx, int fd) {
connection *con = hctx->remote_conn;
chunkqueue *cq = con->request_content_queue;
chunk *c;
/* old comment: windows doesn't support select() on pipes - wouldn't be easy to fix for all platforms.
* solution: if this is still a problem on windows, then substitute
* socketpair() for pipe() and closesocket() for close() on windows.
*/
for (c = cq->first; c; c = cq->first) {
ssize_t r = -1;
switch(c->type) {
case FILE_CHUNK:
r = cgi_write_file_chunk_mmap(srv, con, fd, cq);
break;
case MEM_CHUNK:
if ((r = write(fd, c->mem->ptr + c->offset, buffer_string_length(c->mem) - c->offset)) < 0) {
switch(errno) {
case EAGAIN:
case EINTR:
/* ignore and try again */
r = 0;
break;
case EPIPE:
case ECONNRESET:
/* connection closed */
r = -2;
break;
default:
/* fatal error */
log_error_write(srv, __FILE__, __LINE__, "ss", "write failed due to: ", strerror(errno));
r = -1;
break;
}
} else if (r > 0) {
chunkqueue_mark_written(cq, r);
}
break;
}
if (0 == r) break; /*(might block)*/
switch (r) {
case -1:
/* fatal error */
return -1;
case -2:
/* connection reset */
log_error_write(srv, __FILE__, __LINE__, "s", "failed to send post data to cgi, connection closed by CGI");
/* skip all remaining data */
chunkqueue_mark_written(cq, chunkqueue_length(cq));
break;
default:
break;
}
}
if (cq->bytes_out == (off_t)con->request.content_length && !hctx->conf.upgrade) {
/* sent all request body input */
/* close connection to the cgi-script */
if (-1 == hctx->fdtocgi) { /*(received request body sent in initial send to pipe buffer)*/
--srv->cur_fds;
if (close(fd)) {
log_error_write(srv, __FILE__, __LINE__, "sds", "cgi stdin close failed ", fd, strerror(errno));
}
} else {
cgi_connection_close_fdtocgi(srv, hctx); /*(closes only hctx->fdtocgi)*/
}
} else {
off_t cqlen = cq->bytes_in - cq->bytes_out;
if (cq->bytes_in != con->request.content_length && cqlen < 65536 - 16384) {
/*(con->conf.stream_request_body & FDEVENT_STREAM_REQUEST)*/
if (!(con->conf.stream_request_body & FDEVENT_STREAM_REQUEST_POLLIN)) {
con->conf.stream_request_body |= FDEVENT_STREAM_REQUEST_POLLIN;
con->is_readable = 1; /* trigger optimistic read from client */
}
}
if (-1 == hctx->fdtocgi) { /*(not registered yet)*/
hctx->fdtocgi = fd;
hctx->fde_ndx_tocgi = -1;
fdevent_register(srv->ev, hctx->fdtocgi, cgi_handle_fdevent_send, hctx);
}
if (0 == cqlen) { /*(chunkqueue_is_empty(cq))*/
if ((fdevent_event_get_interest(srv->ev, hctx->fdtocgi) & FDEVENT_OUT)) {
fdevent_event_set(srv->ev, &(hctx->fde_ndx_tocgi), hctx->fdtocgi, 0);
}
} else {
/* more request body remains to be sent to CGI so register for fdevents */