2019-08-11 06:47:19 +01:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2021-02-01 21:16:25 +00:00
|
|
|
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
|
2019-08-11 06:47:19 +01:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
2019-08-11 06:47:19 +01:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
2019-08-11 06:47:19 +01:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
2019-08-11 06:47:19 +01:00
|
|
|
*/
|
|
|
|
|
2019-08-15 10:49:52 +01:00
|
|
|
#include "private-lib-core.h"
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
#if defined(STANDALONE)
|
|
|
|
#undef lws_malloc
|
|
|
|
#define lws_malloc(a, b) malloc(a)
|
|
|
|
#undef lws_free
|
|
|
|
#define lws_free(a) free(a)
|
|
|
|
#undef lws_free_set_NULL
|
|
|
|
#define lws_free_set_NULL(a) { if (a) { free(a); a = NULL; }}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
struct lws_dsh_search {
|
|
|
|
size_t required;
|
2021-09-04 05:05:21 +01:00
|
|
|
ssize_t natural_required;
|
2019-08-11 06:47:19 +01:00
|
|
|
int kind;
|
|
|
|
lws_dsh_obj_t *best;
|
|
|
|
lws_dsh_t *dsh;
|
2021-09-04 05:05:21 +01:00
|
|
|
lws_dsh_obj_t *tail_obj;
|
|
|
|
void *natural; /* coalesce address against last tail */
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
lws_dsh_t *already_checked;
|
|
|
|
lws_dsh_t *this_dsh;
|
2021-09-04 05:05:21 +01:00
|
|
|
|
|
|
|
char coalesce;
|
2019-08-11 06:47:19 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
_lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
|
|
|
|
const void *src2, size_t size2, lws_dll2_t *replace);
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
lws_dsh_align(size_t length)
|
|
|
|
{
|
|
|
|
size_t align = sizeof(int *);
|
|
|
|
|
|
|
|
if (length & (align - 1))
|
|
|
|
length += align - (length & (align - 1));
|
|
|
|
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
void
|
|
|
|
lws_dsh_empty(struct lws_dsh *dsh)
|
|
|
|
{
|
|
|
|
lws_dsh_obj_t *obj;
|
|
|
|
size_t oha_len;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!dsh)
|
|
|
|
return;
|
|
|
|
|
|
|
|
oha_len = sizeof(lws_dsh_obj_head_t) * (unsigned int)dsh->count_kinds;
|
|
|
|
|
|
|
|
/* clear down the obj heads array */
|
|
|
|
|
|
|
|
memset(dsh->oha, 0, oha_len);
|
|
|
|
for (n = 0; n < dsh->count_kinds; n++) {
|
|
|
|
dsh->oha[n].kind = n;
|
|
|
|
dsh->oha[n].total_size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initially the whole buffer is on the free kind (0) list */
|
|
|
|
|
|
|
|
obj = (lws_dsh_obj_t *)dsh->buf;
|
|
|
|
memset(obj, 0, sizeof(*obj));
|
|
|
|
obj->asize = dsh->buffer_size - sizeof(*obj);
|
|
|
|
|
|
|
|
lws_dll2_add_head(&obj->list, &dsh->oha[0].owner);
|
|
|
|
|
|
|
|
dsh->locally_free = obj->asize;
|
|
|
|
dsh->locally_in_use = 0;
|
|
|
|
}
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
lws_dsh_t *
|
2021-09-04 05:05:21 +01:00
|
|
|
lws_dsh_create(lws_dll2_owner_t *owner, size_t buf_len, int _count_kinds)
|
2019-08-11 06:47:19 +01:00
|
|
|
{
|
2021-09-04 05:05:21 +01:00
|
|
|
int count_kinds = _count_kinds & 0xff;
|
2019-08-11 06:47:19 +01:00
|
|
|
lws_dsh_t *dsh;
|
2021-09-04 05:05:21 +01:00
|
|
|
size_t oha_len;
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
oha_len = sizeof(lws_dsh_obj_head_t) * (unsigned int)(++count_kinds);
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
assert(buf_len);
|
|
|
|
assert(count_kinds > 1);
|
2020-12-31 15:08:48 +00:00
|
|
|
assert(buf_len > sizeof(lws_dsh_t) + oha_len);
|
2021-02-01 21:16:25 +00:00
|
|
|
buf_len += 64;
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
dsh = lws_malloc(sizeof(lws_dsh_t) + buf_len + oha_len, __func__);
|
|
|
|
if (!dsh)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* set convenience pointers to the overallocated parts */
|
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
lws_dll2_clear(&dsh->list);
|
2019-08-11 06:47:19 +01:00
|
|
|
dsh->oha = (lws_dsh_obj_head_t *)&dsh[1];
|
|
|
|
dsh->buf = ((uint8_t *)dsh->oha) + oha_len;
|
|
|
|
dsh->count_kinds = count_kinds;
|
|
|
|
dsh->buffer_size = buf_len;
|
|
|
|
dsh->being_destroyed = 0;
|
2021-09-04 05:05:21 +01:00
|
|
|
dsh->splitat = 0;
|
|
|
|
dsh->flags = (unsigned int)_count_kinds & 0xff000000u;
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
lws_dsh_empty(dsh);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (owner)
|
|
|
|
lws_dll2_add_head(&dsh->list, owner);
|
|
|
|
|
|
|
|
// lws_dsh_describe(dsh, "post-init");
|
|
|
|
|
|
|
|
return dsh;
|
|
|
|
}
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
/*
|
|
|
|
* We're flicking through the hole list... if we find a suitable hole starting
|
|
|
|
* right after the current tail, it means we can coalesce against the current
|
|
|
|
* tail, that overrides all other considerations
|
|
|
|
*/
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
static int
|
|
|
|
search_best_free(struct lws_dll2 *d, void *user)
|
|
|
|
{
|
|
|
|
struct lws_dsh_search *s = (struct lws_dsh_search *)user;
|
|
|
|
lws_dsh_obj_t *obj = lws_container_of(d, lws_dsh_obj_t, list);
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
// lwsl_debug("%s: obj %p, asize %zu (req %zu)\n", __func__, obj,
|
|
|
|
// obj->asize, s->required);
|
|
|
|
|
|
|
|
// if (s->tail_obj)
|
|
|
|
// lwsl_notice("%s: tail est %d, splitat %d\n", __func__,
|
|
|
|
// (int)(s->tail_obj->asize + (size_t)s->natural_required), (int)s->dsh->splitat);
|
|
|
|
|
|
|
|
|
|
|
|
if (s->dsh->flags & LWS_DSHFLAG_ENABLE_COALESCE) {
|
|
|
|
if (obj == s->natural && s->tail_obj &&
|
|
|
|
(int)obj->asize >= s->natural_required
|
|
|
|
&&
|
|
|
|
(!s->dsh->splitat ||
|
|
|
|
(size_t)(s->tail_obj->asize +
|
|
|
|
(size_t)s->natural_required) <= s->dsh->splitat)
|
|
|
|
) {
|
|
|
|
// lwsl_user("%s: found natural\n", __func__);
|
|
|
|
s->dsh = s->this_dsh;
|
|
|
|
s->best = obj;
|
|
|
|
s->coalesce = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->coalesce)
|
|
|
|
return 0;
|
|
|
|
}
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (obj->asize >= s->required &&
|
|
|
|
(!s->best || obj->asize < s->best->asize)) {
|
|
|
|
s->best = obj;
|
|
|
|
s->dsh = s->this_dsh;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
static int
|
|
|
|
buf_compare(const lws_dll2_t *d, const lws_dll2_t *i)
|
|
|
|
{
|
|
|
|
return (int)lws_ptr_diff(d, i);
|
|
|
|
}
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
void
|
|
|
|
lws_dsh_destroy(lws_dsh_t **pdsh)
|
|
|
|
{
|
|
|
|
lws_dsh_t *dsh = *pdsh;
|
|
|
|
|
|
|
|
if (!dsh)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dsh->being_destroyed = 1;
|
|
|
|
|
|
|
|
lws_dll2_remove(&dsh->list);
|
2021-09-14 05:33:24 +01:00
|
|
|
lws_dsh_empty(dsh);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
/* everything else is in one heap allocation */
|
|
|
|
|
|
|
|
lws_free_set_NULL(*pdsh);
|
|
|
|
}
|
|
|
|
|
ss: proxy: get rx flow control working
This fixes the proxy rx flow by adding an lws_dsh helper to hide the
off-by-one in the "kind" array (kind 0 is reserved for tracking the
unallocated dsh blocks).
For testing, it adds a --blob option on minimal-secure-streams[-client]
which uses a streamtype "bulkproxflow" from here
https://warmcat.com/policy/minimal-proxy-v4.2-v2.json
"bulkproxflow": {
"endpoint": "warmcat.com",
"port": 443,
"protocol": "h1",
"http_method": "GET",
"http_url": "blob.bin",
"proxy_buflen": 32768,
"proxy_buflen_rxflow_on_above": 24576,
"proxy_buflen_rxflow_off_below": 8192,
"tls": true,
"retry": "default",
"tls_trust_store": "le_via_dst"
}
This downloads a 51MB blob of random data with the SHA256sum
ed5720c16830810e5829dfb9b66c96b2e24efc4f93aa5e38c7ff4150d31cfbbf
The minimal-secure-streams --blob example client delays the download by
50ms every 10KiB it sees to force rx flow usage at the proxy.
It downloads the whole thing and checks the SHA256 is as expected.
Logs about rxflow status are available at LLL_INFO log level.
2021-04-07 14:25:07 +01:00
|
|
|
size_t
|
|
|
|
lws_dsh_get_size(struct lws_dsh *dsh, int kind)
|
|
|
|
{
|
|
|
|
kind++;
|
|
|
|
assert(kind < dsh->count_kinds);
|
|
|
|
|
|
|
|
return dsh->oha[kind].total_size;
|
|
|
|
}
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
static int
|
|
|
|
_lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
|
|
|
|
const void *src2, size_t size2, lws_dll2_t *replace)
|
|
|
|
{
|
|
|
|
size_t asize = sizeof(lws_dsh_obj_t) + lws_dsh_align(size1 + size2);
|
|
|
|
struct lws_dsh_search s;
|
|
|
|
|
|
|
|
assert(kind >= 0);
|
|
|
|
kind++;
|
|
|
|
assert(!dsh || kind < dsh->count_kinds);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search our free list looking for the smallest guy who will fit
|
|
|
|
* what we want to allocate
|
|
|
|
*/
|
2021-09-04 05:05:21 +01:00
|
|
|
s.dsh = dsh;
|
|
|
|
s.required = asize;
|
|
|
|
s.kind = kind;
|
|
|
|
s.best = NULL;
|
|
|
|
s.already_checked = NULL;
|
|
|
|
s.this_dsh = dsh;
|
|
|
|
s.natural = NULL;
|
|
|
|
s.coalesce = 0;
|
|
|
|
s.natural_required = 0;
|
|
|
|
/* list is at the very start, so we can cast */
|
|
|
|
s.tail_obj = (lws_dsh_obj_t *)dsh->oha[kind].owner.tail;
|
|
|
|
|
|
|
|
if (s.tail_obj) {
|
|
|
|
|
|
|
|
assert(s.tail_obj->kind == kind);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* there's a tail... precompute where a natural hole would
|
|
|
|
* have to start to be coalescable
|
|
|
|
*/
|
|
|
|
s.natural = (uint8_t *)s.tail_obj + s.tail_obj->asize;
|
|
|
|
/*
|
|
|
|
* ... and precompute the needed hole extent (including its
|
|
|
|
* obj part we would no longer need if we coalesced, and
|
|
|
|
* accounting for any unused / alignment part in the tail
|
|
|
|
*/
|
|
|
|
s.natural_required = (ssize_t)(lws_dsh_align(s.tail_obj->size + size1 + size2) -
|
|
|
|
s.tail_obj->asize + sizeof(lws_dsh_obj_t));
|
|
|
|
|
|
|
|
// lwsl_notice("%s: natural %p, tail len %d, nreq %d, splitat %d\n", __func__, s.natural,
|
|
|
|
// (int)s.tail_obj->size, (int)s.natural_required, (int)dsh->splitat);
|
|
|
|
}
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (dsh && !dsh->being_destroyed)
|
|
|
|
lws_dll2_foreach_safe(&dsh->oha[0].owner, &s, search_best_free);
|
|
|
|
|
|
|
|
if (!s.best) {
|
2021-09-14 05:33:24 +01:00
|
|
|
//lwsl_notice("%s: no buffer has space for %lu\n",
|
|
|
|
// __func__, (unsigned long)asize);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-17 13:31:50 +01:00
|
|
|
return 1;
|
2019-08-11 06:47:19 +01:00
|
|
|
}
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
if (s.coalesce) {
|
|
|
|
uint8_t *nf = (uint8_t *)&s.tail_obj[1] + s.tail_obj->size,
|
|
|
|
*e = (uint8_t *)s.best + s.best->asize, *ce;
|
|
|
|
lws_dsh_obj_t *rh;
|
|
|
|
size_t le;
|
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
// lwsl_notice("%s: coalescing\n", __func__);
|
2021-09-04 05:05:21 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* logically remove the free list entry we're taking over the
|
|
|
|
* memory footprint of
|
|
|
|
*/
|
|
|
|
lws_dll2_remove(&s.best->list);
|
|
|
|
s.dsh->locally_free -= s.best->asize;
|
|
|
|
if (s.dsh->oha[kind].total_size < s.tail_obj->asize) {
|
|
|
|
lwsl_err("%s: total_size %d, asize %d, hdr size %d\n", __func__,
|
|
|
|
(int)s.dsh->oha[kind].total_size,
|
|
|
|
(int)s.tail_obj->asize, (int)sizeof(lws_dsh_obj_t));
|
|
|
|
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
s.dsh->oha[kind].total_size -= s.tail_obj->asize;
|
|
|
|
s.dsh->locally_in_use -= s.tail_obj->asize;
|
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
if (size1) {
|
|
|
|
memcpy(nf, src1, size1);
|
|
|
|
nf += size1;
|
|
|
|
}
|
2021-09-04 05:05:21 +01:00
|
|
|
if (size2) {
|
|
|
|
memcpy(nf, src2, size2);
|
|
|
|
nf += size2;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* adjust the tail guy's sizes to account for the coalesced
|
|
|
|
* data and alignment for the end point
|
|
|
|
*/
|
|
|
|
|
|
|
|
s.tail_obj->size = s.tail_obj->size + size1 + size2;
|
|
|
|
s.tail_obj->asize = sizeof(lws_dsh_obj_t) +
|
|
|
|
lws_dsh_align(s.tail_obj->size);
|
|
|
|
|
|
|
|
ce = (uint8_t *)s.tail_obj + s.tail_obj->asize;
|
|
|
|
assert(ce <= e);
|
|
|
|
le = lws_ptr_diff_size_t(e, ce);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we have to decide what to do with any leftovers...
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (le < 64)
|
|
|
|
/*
|
|
|
|
* just absorb it into the coalesced guy as spare, and
|
|
|
|
* no need for a replacement hole
|
|
|
|
*/
|
|
|
|
s.tail_obj->asize += le;
|
|
|
|
else {
|
|
|
|
|
|
|
|
rh = (lws_dsh_obj_t *)ce;
|
|
|
|
|
|
|
|
memset(rh, 0, sizeof(*rh));
|
|
|
|
rh->asize = le;
|
|
|
|
lws_dll2_add_sorted(&rh->list, &s.dsh->oha[0].owner,
|
|
|
|
buf_compare);
|
|
|
|
s.dsh->locally_free += rh->asize;
|
|
|
|
}
|
|
|
|
|
|
|
|
s.dsh->oha[kind].total_size += s.tail_obj->asize;
|
|
|
|
s.dsh->locally_in_use += s.tail_obj->asize;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-08-11 06:47:19 +01:00
|
|
|
/* anything coming out of here must be aligned */
|
2021-09-14 05:33:24 +01:00
|
|
|
assert(!(((size_t)(intptr_t)s.best) & (sizeof(int *) - 1)));
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (s.best->asize < asize + (2 * sizeof(*s.best))) {
|
2021-09-04 05:05:21 +01:00
|
|
|
|
|
|
|
// lwsl_notice("%s: exact\n", __func__);
|
2019-08-11 06:47:19 +01:00
|
|
|
/*
|
|
|
|
* Exact fit, or close enough we can't / don't want to have to
|
|
|
|
* track the little bit of free area that would be left.
|
|
|
|
*
|
|
|
|
* Move the object from the free list to the oha of the
|
|
|
|
* desired kind
|
|
|
|
*/
|
|
|
|
lws_dll2_remove(&s.best->list);
|
|
|
|
s.best->dsh = s.dsh;
|
2021-02-01 21:16:25 +00:00
|
|
|
s.best->kind = kind;
|
2019-08-11 06:47:19 +01:00
|
|
|
s.best->size = size1 + size2;
|
|
|
|
memcpy(&s.best[1], src1, size1);
|
|
|
|
if (src2)
|
|
|
|
memcpy((uint8_t *)&s.best[1] + size1, src2, size2);
|
|
|
|
|
|
|
|
if (replace) {
|
|
|
|
s.best->list.prev = replace->prev;
|
|
|
|
s.best->list.next = replace->next;
|
|
|
|
s.best->list.owner = replace->owner;
|
|
|
|
if (replace->prev)
|
|
|
|
replace->prev->next = &s.best->list;
|
|
|
|
if (replace->next)
|
|
|
|
replace->next->prev = &s.best->list;
|
|
|
|
} else
|
2021-02-01 21:16:25 +00:00
|
|
|
if (dsh) {
|
2021-09-04 05:05:21 +01:00
|
|
|
assert(!(((unsigned long)(intptr_t)(s.best)) &
|
|
|
|
(sizeof(int *) - 1)));
|
|
|
|
lws_dll2_add_tail(&s.best->list,
|
|
|
|
&dsh->oha[kind].owner);
|
2021-02-01 21:16:25 +00:00
|
|
|
}
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
assert(s.dsh->locally_free >= s.best->asize);
|
|
|
|
s.dsh->locally_free -= s.best->asize;
|
|
|
|
s.dsh->locally_in_use += s.best->asize;
|
2021-02-01 21:16:25 +00:00
|
|
|
dsh->oha[kind].total_size += s.best->asize;
|
2019-08-11 06:47:19 +01:00
|
|
|
assert(s.dsh->locally_in_use <= s.dsh->buffer_size);
|
|
|
|
} else {
|
2021-09-04 05:05:21 +01:00
|
|
|
lws_dsh_obj_t *nf;
|
|
|
|
#if defined(_DEBUG)
|
|
|
|
uint8_t *e = ((uint8_t *)s.best) + s.best->asize;
|
|
|
|
#endif
|
2019-08-11 06:47:19 +01:00
|
|
|
/*
|
|
|
|
* Free area was oversize enough that we need to split it.
|
|
|
|
*
|
2021-09-04 05:05:21 +01:00
|
|
|
* Unlink the free area and move its header forward to account
|
|
|
|
* for our usage of its start area. It's like this so that we
|
|
|
|
* can coalesce sequential objects.
|
2019-08-11 06:47:19 +01:00
|
|
|
*/
|
2021-09-04 05:05:21 +01:00
|
|
|
//lwsl_notice("%s: splitting... free reduce %zu -> %zu\n",
|
|
|
|
// __func__, s.best->asize, s.best->asize - asize);
|
|
|
|
|
|
|
|
assert(s.best->asize >= asize);
|
|
|
|
|
|
|
|
/* unlink the entire original hole object at s.best */
|
|
|
|
lws_dll2_remove(&s.best->list);
|
|
|
|
s.dsh->locally_free -= s.best->asize;
|
|
|
|
s.dsh->locally_in_use += asize;
|
|
|
|
|
|
|
|
/* latter part becomes new hole object */
|
|
|
|
|
|
|
|
nf = (lws_dsh_obj_t *)(((uint8_t *)s.best) + asize);
|
|
|
|
|
|
|
|
assert((uint8_t *)nf < e);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
memset(nf, 0, sizeof(*nf));
|
|
|
|
nf->asize = s.best->asize - asize; /* rump free part only */
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
assert(((uint8_t *)nf) + nf->asize <= e);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
lws_dll2_add_sorted(&nf->list, &s.dsh->oha[0].owner, buf_compare);
|
|
|
|
s.dsh->locally_free += s.best->asize;
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
/* take over s.best as the new allocated object, fill it in */
|
2019-08-11 06:47:19 +01:00
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
s.best->dsh = s.dsh;
|
|
|
|
s.best->kind = kind;
|
|
|
|
s.best->size = size1 + size2;
|
|
|
|
s.best->asize = asize;
|
|
|
|
|
|
|
|
// lwsl_notice("%s: split off kind %d\n", __func__, kind);
|
|
|
|
|
|
|
|
assert((uint8_t *)s.best + s.best->asize < e);
|
|
|
|
assert((uint8_t *)s.best + s.best->asize <= (uint8_t *)nf);
|
|
|
|
|
2021-09-14 05:33:24 +01:00
|
|
|
if (size1)
|
|
|
|
memcpy(&s.best[1], src1, size1);
|
2019-08-11 06:47:19 +01:00
|
|
|
if (src2)
|
2021-09-04 05:05:21 +01:00
|
|
|
memcpy((uint8_t *)&s.best[1] + size1, src2, size2);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (replace) {
|
|
|
|
s.best->list.prev = replace->prev;
|
|
|
|
s.best->list.next = replace->next;
|
|
|
|
s.best->list.owner = replace->owner;
|
|
|
|
if (replace->prev)
|
|
|
|
replace->prev->next = &s.best->list;
|
|
|
|
if (replace->next)
|
|
|
|
replace->next->prev = &s.best->list;
|
|
|
|
} else
|
2021-02-01 21:16:25 +00:00
|
|
|
if (dsh) {
|
2021-09-04 05:05:21 +01:00
|
|
|
assert(!(((unsigned long)(intptr_t)(s.best)) &
|
|
|
|
(sizeof(int *) - 1)));
|
|
|
|
lws_dll2_add_tail(&s.best->list,
|
|
|
|
&dsh->oha[kind].owner);
|
2021-02-01 21:16:25 +00:00
|
|
|
}
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
assert(s.dsh->locally_free >= asize);
|
2021-02-01 21:16:25 +00:00
|
|
|
dsh->oha[kind].total_size += asize;
|
2019-08-11 06:47:19 +01:00
|
|
|
assert(s.dsh->locally_in_use <= s.dsh->buffer_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
// lws_dsh_describe(dsh, "post-alloc");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_dsh_alloc_tail(lws_dsh_t *dsh, int kind, const void *src1, size_t size1,
|
|
|
|
const void *src2, size_t size2)
|
|
|
|
{
|
2021-09-04 05:05:21 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
do {
|
|
|
|
size_t s1 = size1, s2 = size2;
|
|
|
|
|
|
|
|
if (!dsh->splitat || !(dsh->flags & LWS_DSHFLAG_ENABLE_SPLIT)) {
|
|
|
|
s1 = size1;
|
|
|
|
s2 = size2;
|
|
|
|
} else
|
|
|
|
if (s1 > dsh->splitat) {
|
|
|
|
s1 = dsh->splitat;
|
|
|
|
s2 = 0;
|
|
|
|
} else {
|
|
|
|
if (s1 + s2 > dsh->splitat)
|
|
|
|
s2 = dsh->splitat - s1;
|
|
|
|
}
|
|
|
|
r = _lws_dsh_alloc_tail(dsh, kind, src1, s1, src2, s2, NULL);
|
|
|
|
if (r)
|
|
|
|
return r;
|
2021-12-23 06:18:33 +00:00
|
|
|
src1 = (void *)((uint8_t *)src1 + s1);
|
|
|
|
src2 = (void *)((uint8_t *)src2 + s2);
|
2021-09-04 05:05:21 +01:00
|
|
|
size1 -= s1;
|
|
|
|
size2 -= s2;
|
|
|
|
} while (size1 + size2);
|
|
|
|
|
|
|
|
return 0;
|
2019-08-11 06:47:19 +01:00
|
|
|
}
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
void
|
|
|
|
lws_dsh_consume(struct lws_dsh *dsh, int kind, size_t len)
|
2019-08-11 06:47:19 +01:00
|
|
|
{
|
2021-09-04 05:05:21 +01:00
|
|
|
lws_dsh_obj_t *h = (lws_dsh_obj_t *)dsh->oha[kind + 1].owner.head;
|
|
|
|
|
|
|
|
assert(len <= h->size);
|
|
|
|
assert(h->pos + len <= h->size);
|
|
|
|
|
|
|
|
if (len == h->size || h->pos + len == h->size) {
|
|
|
|
lws_dsh_free((void **)&h);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(0);
|
|
|
|
|
|
|
|
h->pos += len;
|
2019-08-11 06:47:19 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_dsh_free(void **pobj)
|
|
|
|
{
|
|
|
|
lws_dsh_obj_t *_o = (lws_dsh_obj_t *)((uint8_t *)(*pobj) - sizeof(*_o)),
|
|
|
|
*_o2;
|
|
|
|
lws_dsh_t *dsh = _o->dsh;
|
|
|
|
|
|
|
|
/* anything coming out of here must be aligned */
|
2021-09-14 05:33:24 +01:00
|
|
|
assert(!(((size_t)(intptr_t)_o) & (sizeof(int *) - 1)));
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the object from its list and place on the free list of the
|
|
|
|
* dsh the buffer space belongs to
|
|
|
|
*/
|
|
|
|
|
|
|
|
lws_dll2_remove(&_o->list);
|
|
|
|
*pobj = NULL;
|
|
|
|
|
|
|
|
assert(dsh->locally_in_use >= _o->asize);
|
|
|
|
dsh->locally_free += _o->asize;
|
|
|
|
dsh->locally_in_use -= _o->asize;
|
2021-09-04 05:05:21 +01:00
|
|
|
assert(dsh->oha[_o->kind].total_size >= _o->asize);
|
2021-02-01 21:16:25 +00:00
|
|
|
dsh->oha[_o->kind].total_size -= _o->asize; /* account for usage by kind */
|
2019-08-11 06:47:19 +01:00
|
|
|
assert(dsh->locally_in_use <= dsh->buffer_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The free space list is sorted in buffer address order, so detecting
|
|
|
|
* coalescing opportunities is cheap. Because the free list should be
|
|
|
|
* continuously tending to reduce by coalescing, the sorting should not
|
|
|
|
* be expensive to maintain.
|
|
|
|
*/
|
|
|
|
_o->size = 0; /* not meaningful when on free list */
|
|
|
|
lws_dll2_add_sorted(&_o->list, &_o->dsh->oha[0].owner, buf_compare);
|
|
|
|
|
|
|
|
/* First check for already-free block at the end we can subsume.
|
|
|
|
* Because the free list is sorted, if there is such a guy he is
|
|
|
|
* already our list.next */
|
|
|
|
|
|
|
|
_o2 = (lws_dsh_obj_t *)_o->list.next;
|
|
|
|
if (_o2 && (uint8_t *)_o + _o->asize == (uint8_t *)_o2) {
|
|
|
|
/*
|
|
|
|
* since we are freeing _obj, we can coalesce with a
|
|
|
|
* free area immediately ahead of it
|
|
|
|
*
|
|
|
|
* [ _o (being freed) ][ _o2 (free) ] -> [ larger _o ]
|
|
|
|
*/
|
|
|
|
_o->asize += _o2->asize;
|
|
|
|
|
|
|
|
/* guy next to us was absorbed into us */
|
|
|
|
lws_dll2_remove(&_o2->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then check if we can be subsumed by a free block behind us.
|
|
|
|
* Because the free list is sorted, if there is such a guy he is
|
|
|
|
* already our list.prev */
|
|
|
|
|
|
|
|
_o2 = (lws_dsh_obj_t *)_o->list.prev;
|
|
|
|
if (_o2 && (uint8_t *)_o2 + _o2->asize == (uint8_t *)_o) {
|
|
|
|
/*
|
|
|
|
* since we are freeing obj, we can coalesce it with
|
|
|
|
* the previous free area that abuts it
|
|
|
|
*
|
|
|
|
* [ _o2 (free) ][ _o (being freed) ] -> [ larger _o2 ]
|
|
|
|
*/
|
|
|
|
_o2->asize += _o->asize;
|
|
|
|
|
|
|
|
/* we were absorbed! */
|
|
|
|
lws_dll2_remove(&_o->list);
|
|
|
|
}
|
|
|
|
|
|
|
|
// lws_dsh_describe(dsh, "post-alloc");
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_dsh_get_head(lws_dsh_t *dsh, int kind, void **obj, size_t *size)
|
|
|
|
{
|
2021-02-19 08:15:10 +00:00
|
|
|
lws_dsh_obj_t *_obj;
|
|
|
|
|
|
|
|
if (!dsh)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
_obj = (lws_dsh_obj_t *)lws_dll2_get_head(&dsh->oha[kind + 1].owner);
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
if (!_obj) {
|
|
|
|
*obj = 0;
|
|
|
|
*size = 0;
|
|
|
|
|
|
|
|
return 1; /* there is no head */
|
|
|
|
}
|
|
|
|
|
|
|
|
*obj = (void *)(&_obj[1]);
|
|
|
|
*size = _obj->size;
|
|
|
|
|
|
|
|
/* anything coming out of here must be aligned */
|
2021-02-01 21:16:25 +00:00
|
|
|
assert(!(((unsigned long)(intptr_t)(*obj)) & (sizeof(int *) - 1)));
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
return 0; /* we returned the head */
|
|
|
|
}
|
|
|
|
|
2021-03-15 12:12:16 +00:00
|
|
|
#if defined(_DEBUG) && !defined(LWS_WITH_NO_LOGS)
|
2019-08-11 06:47:19 +01:00
|
|
|
|
|
|
|
static int
|
|
|
|
describe_kind(struct lws_dll2 *d, void *user)
|
|
|
|
{
|
|
|
|
lws_dsh_obj_t *obj = lws_container_of(d, lws_dsh_obj_t, list);
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
lwsl_notice(" _obj %p - %p, dsh %p, size %zu, asize %zu\n",
|
2019-08-11 06:47:19 +01:00
|
|
|
obj, (uint8_t *)obj + obj->asize,
|
|
|
|
obj->dsh, obj->size, obj->asize);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_dsh_describe(lws_dsh_t *dsh, const char *desc)
|
|
|
|
{
|
|
|
|
int n = 0;
|
|
|
|
|
2021-09-04 05:05:21 +01:00
|
|
|
lwsl_notice("%s: dsh %p, bufsize %zu, kinds %d, lf: %zu, liu: %zu, %s\n",
|
2019-08-11 06:47:19 +01:00
|
|
|
__func__, dsh, dsh->buffer_size, dsh->count_kinds,
|
|
|
|
dsh->locally_free, dsh->locally_in_use, desc);
|
|
|
|
|
|
|
|
for (n = 0; n < dsh->count_kinds; n++) {
|
2021-09-04 05:05:21 +01:00
|
|
|
lwsl_notice(" Kind %d:\n", n);
|
2019-08-11 06:47:19 +01:00
|
|
|
lws_dll2_foreach_safe(&dsh->oha[n].owner, dsh, describe_kind);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|