[multiple] internal control for backend read bytes
separate internal control for backend max_per_read When not streaming, large reads will be flushed to temp files on disk. When streaming, use a smaller buffer to help reduce memory usage. When not streaming, attempt to read and empty kernel socket bufs. (e.g. MAX_READ_LIMIT 256k) When writing to sockets (or pipes) attempt to fill kernel socket bufs. (e.g. MAX_WRITE_LIMIT 256k)personal/stbuehler/tests-path
parent
d59d5e59b9
commit
f19f71625c
|
@ -1896,8 +1896,14 @@ connection_handle_read_post_state (request_st * const r)
|
|||
}
|
||||
else if (con->is_readable > 0) {
|
||||
con->read_idle_ts = log_monotonic_secs;
|
||||
|
||||
switch(con->network_read(con, cq, MAX_READ_LIMIT)) {
|
||||
const off_t max_per_read =
|
||||
!(r->conf.stream_request_body /*(if not streaming request body)*/
|
||||
& (FDEVENT_STREAM_REQUEST|FDEVENT_STREAM_REQUEST_BUFMIN))
|
||||
? MAX_READ_LIMIT
|
||||
: (r->conf.stream_request_body & FDEVENT_STREAM_REQUEST_BUFMIN)
|
||||
? 16384 /* FDEVENT_STREAM_REQUEST_BUFMIN */
|
||||
: 65536; /* FDEVENT_STREAM_REQUEST */
|
||||
switch(con->network_read(con, cq, max_per_read)) {
|
||||
case -1:
|
||||
connection_set_state_error(r, CON_STATE_ERROR);
|
||||
return HANDLER_ERROR;
|
||||
|
|
|
@ -2629,6 +2629,13 @@ handler_t gw_check_extension(request_st * const r, gw_plugin_data * const p, int
|
|||
hctx->conf.proto = p->conf.proto;
|
||||
hctx->conf.debug = p->conf.debug;
|
||||
|
||||
hctx->opts.max_per_read =
|
||||
!(r->conf.stream_response_body /*(if not streaming response body)*/
|
||||
& (FDEVENT_STREAM_RESPONSE|FDEVENT_STREAM_RESPONSE_BUFMIN))
|
||||
? 262144
|
||||
: (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
|
||||
? 16384 /* FDEVENT_STREAM_RESPONSE_BUFMIN */
|
||||
: 65536; /* FDEVENT_STREAM_RESPONSE */
|
||||
hctx->opts.fdfmt = S_IFSOCK;
|
||||
hctx->opts.authorizer = (gw_mode == GW_AUTHORIZER);
|
||||
hctx->opts.local_redir = 0;
|
||||
|
|
|
@ -1153,6 +1153,7 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
|
|||
const int fd = fdn->fd;
|
||||
ssize_t n;
|
||||
size_t avail;
|
||||
/*size_t total = 0;*/
|
||||
do {
|
||||
unsigned int toread = 0;
|
||||
avail = buffer_string_space(b);
|
||||
|
@ -1162,8 +1163,8 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
|
|||
uint32_t blen = buffer_clen(b);
|
||||
if (toread + blen < 4096)
|
||||
toread = 4095 - blen;
|
||||
else if (toread > MAX_READ_LIMIT)
|
||||
toread = MAX_READ_LIMIT;
|
||||
else if (toread > opts->max_per_read)
|
||||
toread = opts->max_per_read;
|
||||
}
|
||||
else if (0 == toread) {
|
||||
#if 0
|
||||
|
@ -1212,7 +1213,9 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
|
|||
|
||||
if (avail < toread) {
|
||||
/*(add avail+toread to reduce allocations when ioctl EOPNOTSUPP)*/
|
||||
avail = toread < MAX_READ_LIMIT && avail ? avail-1+toread : toread;
|
||||
avail = toread < opts->max_per_read && avail
|
||||
? avail-1+toread
|
||||
: toread;
|
||||
avail = chunk_buffer_prepare_append(b, avail);
|
||||
}
|
||||
|
||||
|
@ -1314,8 +1317,9 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
|
|||
break;
|
||||
}
|
||||
}
|
||||
} while ((size_t)n == avail);
|
||||
/* else emptied kernel read buffer or partial read */
|
||||
} while (0); /*(extra logic might benefit systems without FIONREAD)*/
|
||||
/*while ((size_t)n == avail && (total += (size_t)n) < opts->max_per_read);*/
|
||||
/* else emptied kernel read buffer or partial read or reached read limit */
|
||||
|
||||
if (buffer_is_blank(b)) chunk_buffer_yield(b); /*(improve large buf reuse)*/
|
||||
|
||||
|
|
|
@ -194,24 +194,16 @@ static int http_chunk_append_cq_to_tempfile(request_st * const r, chunkqueue * c
|
|||
|
||||
/*(inlined by compiler optimizer)*/
|
||||
__attribute_pure__
|
||||
static int http_chunk_uses_tempfile(const request_st * const r, const chunkqueue * const cq, const size_t len) {
|
||||
static int http_chunk_uses_tempfile(const chunkqueue * const cq, const size_t len) {
|
||||
|
||||
/* current usage does not append_mem or append_buffer after appending
|
||||
* file, so not checking if users of this interface have appended large
|
||||
* (references to) files to chunkqueue, which would not be in memory
|
||||
* (but included in calculation for whether or not to use temp file) */
|
||||
|
||||
/*(allow slightly larger mem use if FDEVENT_STREAM_RESPONSE_BUFMIN
|
||||
* to reduce creation of temp files when backend producer will be
|
||||
* blocked until more data is sent to network to client)*/
|
||||
|
||||
const chunk * const c = cq->last;
|
||||
return
|
||||
((c && c->type == FILE_CHUNK && c->file.is_temp)
|
||||
|| chunkqueue_length(cq) + len
|
||||
> ((r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
|
||||
? 128*1024
|
||||
: 64*1024));
|
||||
|| chunkqueue_length(cq) + len > 65536);
|
||||
}
|
||||
|
||||
__attribute_noinline__
|
||||
|
@ -221,7 +213,7 @@ int http_chunk_append_buffer(request_st * const r, buffer * const mem) {
|
|||
|
||||
chunkqueue * const cq = &r->write_queue;
|
||||
|
||||
if (http_chunk_uses_tempfile(r, cq, len)) {
|
||||
if (http_chunk_uses_tempfile(cq, len)) {
|
||||
int rc = http_chunk_append_to_tempfile(r, mem->ptr, len);
|
||||
buffer_clear(mem);
|
||||
return rc;
|
||||
|
@ -246,7 +238,7 @@ int http_chunk_append_mem(request_st * const r, const char * const mem, const si
|
|||
|
||||
chunkqueue * const cq = &r->write_queue;
|
||||
|
||||
if (http_chunk_uses_tempfile(r, cq, len))
|
||||
if (http_chunk_uses_tempfile(cq, len))
|
||||
return http_chunk_append_to_tempfile(r, mem, len);
|
||||
|
||||
if (r->resp_send_chunked)
|
||||
|
@ -265,7 +257,7 @@ int http_chunk_transfer_cqlen(request_st * const r, chunkqueue * const src, cons
|
|||
|
||||
chunkqueue * const cq = &r->write_queue;
|
||||
|
||||
if (http_chunk_uses_tempfile(r, cq, len))
|
||||
if (http_chunk_uses_tempfile(cq, len))
|
||||
return http_chunk_append_cq_to_tempfile(r, src, len);
|
||||
|
||||
if (r->resp_send_chunked)
|
||||
|
|
|
@ -980,6 +980,13 @@ URIHANDLER_FUNC(cgi_is_handled) {
|
|||
hctx->conf.upgrade
|
||||
&& r->http_version == HTTP_VERSION_1_1
|
||||
&& light_btst(r->rqst_htags, HTTP_HEADER_UPGRADE);
|
||||
hctx->opts.max_per_read =
|
||||
!(r->conf.stream_response_body /*(if not streaming response body)*/
|
||||
& (FDEVENT_STREAM_RESPONSE|FDEVENT_STREAM_RESPONSE_BUFMIN))
|
||||
? 262144
|
||||
: (r->conf.stream_response_body & FDEVENT_STREAM_RESPONSE_BUFMIN)
|
||||
? 16384 /* FDEVENT_STREAM_RESPONSE_BUFMIN */
|
||||
: 65536; /* FDEVENT_STREAM_RESPONSE */
|
||||
hctx->opts.fdfmt = S_IFIFO;
|
||||
hctx->opts.backend = BACKEND_CGI;
|
||||
hctx->opts.authorizer = 0;
|
||||
|
|
|
@ -497,7 +497,8 @@ static handler_t fcgi_check_extension(request_st * const r, void *p_d, int uri_p
|
|||
handler_ctx *hctx = r->plugin_ctx[p->id];
|
||||
hctx->opts.backend = BACKEND_FASTCGI;
|
||||
hctx->opts.parse = fcgi_recv_parse;
|
||||
hctx->opts.pdata = hctx;
|
||||
hctx->opts.pdata = hctx; /*(skip +255 for potential padding)*/
|
||||
hctx->opts.max_per_read = sizeof(FCGI_Header)+FCGI_MAX_LENGTH+1;
|
||||
hctx->stdin_append = fcgi_stdin_append;
|
||||
hctx->create_env = fcgi_create_env;
|
||||
if (!hctx->rb) {
|
||||
|
|
|
@ -64,7 +64,6 @@ static int proxy_check_extforward;
|
|||
|
||||
typedef struct {
|
||||
gw_handler_ctx gw;
|
||||
http_response_opts opts;
|
||||
plugin_config conf;
|
||||
} handler_ctx;
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ enum {
|
|||
};
|
||||
|
||||
typedef struct http_response_opts_t {
|
||||
uint32_t max_per_read;
|
||||
int fdfmt;
|
||||
int backend;
|
||||
int authorizer; /* bool */
|
||||
|
|
Loading…
Reference in New Issue