Browse Source

[core] improve chunk buffer reuse from backends

improve oversized chunk buffer reuse from backends
master
Glenn Strauss 1 month ago
parent
commit
d59d5e59b9
  1. 14
      src/chunk.c
  2. 3
      src/chunk.h
  3. 16
      src/http-header-glue.c

14
src/chunk.c

@ -137,6 +137,7 @@ static chunk * chunk_pop_oversized(size_t sz) {
}
static void chunk_push_oversized(chunk * const c, const size_t sz) {
/* XXX: chunk_buffer_yield() may have removed need for list size limit */
if (chunks_oversized_n < 64 && chunk_buf_sz >= 4096) {
++chunks_oversized_n;
chunk **co = &chunks_oversized;
@ -166,9 +167,6 @@ static buffer * chunk_buffer_acquire_sz(const size_t sz) {
}
else
c = chunk_init_sz(chunk_buf_sz);
/* future: might choose to pop from chunks_oversized, if available
* (even if larger than sz) rather than allocating new chunk
* (and if doing so, might replace chunks_oversized_n) */
}
else {
c = chunk_pop_oversized(sz);
@ -213,6 +211,16 @@ void chunk_buffer_release(buffer *b) {
}
}
void chunk_buffer_yield(buffer *b) {
if (b->size == (chunk_buf_sz|1)) return;
buffer * const cb = chunk_buffer_acquire_sz(chunk_buf_sz);
buffer tb = *b;
*b = *cb;
*cb = tb;
chunk_buffer_release(cb);
}
size_t chunk_buffer_prepare_append(buffer * const b, size_t sz) {
if (sz > buffer_string_space(b)) {
sz += b->used ? b->used : 1;

3
src/chunk.h

@ -60,6 +60,9 @@ buffer * chunk_buffer_acquire(void);
void chunk_buffer_release(buffer *b);
__attribute_nonnull__
void chunk_buffer_yield(buffer *b);
size_t chunk_buffer_prepare_append (buffer *b, size_t sz);
void chunkqueue_chunk_pool_clear(void);

16
src/http-header-glue.c

@ -739,6 +739,7 @@ static int http_response_append_buffer(request_st * const r, buffer * const mem,
}
else if (0 == r->resp_body_scratchpad) {
/*(silently truncate if data exceeds Content-Length)*/
buffer_clear(mem);
return 0;
}
else if (simple_accum
@ -1195,7 +1196,11 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
* mod_proxy_handle_subrequest())*/
fdevent_fdnode_event_clr(r->con->srv->ev, fdn, FDEVENT_IN);
}
if (cqlen >= 65536-1) return HANDLER_GO_ON;
if (cqlen >= 65536-1) {
if (buffer_is_blank(b))
chunk_buffer_yield(b); /*(improve large buf reuse)*/
return HANDLER_GO_ON;
}
toread = 65536 - 1 - (unsigned int)cqlen;
/* Note: heuristic is fuzzy in that it limits how much to read
* from backend based on how much is pending to write to client.
@ -1222,6 +1227,8 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
#endif
#endif
case EINTR:
if (buffer_is_blank(b))
chunk_buffer_yield(b); /*(improve large buf reuse)*/
return HANDLER_GO_ON;
default:
log_perror(r->conf.errh, __FILE__, __LINE__,
@ -1240,9 +1247,12 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
handler_t rc = opts->parse(r, opts, b, (size_t)n);
if (rc != HANDLER_GO_ON) return rc;
} else if (0 == n) {
if (!buffer_is_blank(b) && opts->simple_accum) {
if (buffer_is_blank(b))
chunk_buffer_yield(b); /*(improve large buf reuse)*/
else if (opts->simple_accum) {
/*(flush small reads previously accumulated in b)*/
int rc = http_response_append_buffer(r, b, 0); /*(0 to flush)*/
chunk_buffer_yield(b); /*(improve large buf reuse)*/
if (__builtin_expect( (0 != rc), 0)) {
/* error writing to tempfile;
* truncate response or send 500 if nothing sent yet */
@ -1307,5 +1317,7 @@ handler_t http_response_read(request_st * const r, http_response_opts * const op
} while ((size_t)n == avail);
/* else emptied kernel read buffer or partial read */
if (buffer_is_blank(b)) chunk_buffer_yield(b); /*(improve large buf reuse)*/
return (!r->resp_body_finished ? HANDLER_GO_ON : HANDLER_FINISHED);
}

Loading…
Cancel
Save