|
version 1.2.2.1, 2025/09/26 16:00:22
|
version 1.5.2.2, 2026/02/11 13:33:22
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2025 | Copyright 2004 - 2026 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 138 rbuf_isempty(ringbuf_t *rbuf)
|
Line 138 rbuf_isempty(ringbuf_t *rbuf)
|
| int |
int |
| rbuf_isfull(ringbuf_t *rbuf) |
rbuf_isfull(ringbuf_t *rbuf) |
| { |
{ |
| |
int h, t; |
| |
|
| if (!rbuf) |
if (!rbuf) |
| return -1; |
return -1; |
| if (!rbuf->rb_bufnum) |
if (!rbuf->rb_bufnum) |
| return 1; |
return 1; |
| |
|
| return (((atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed) + 1) % rbuf->rb_bufnum) == | t = atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire); |
| atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire)); | h = atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed) + 1; |
| | if (h >= rbuf->rb_bufnum) |
| | h ^= h; |
| | |
| | return (h == t); |
| } |
} |
| |
|
| /* |
/* |
|
Line 189 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
Line 195 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
| * return: -1 error, 1 buffer is empty or 0 ok |
* return: -1 error, 1 buffer is empty or 0 ok |
| */ |
*/ |
| int |
int |
| rbuf_dequeue(ringbuf_t *rbuf, struct iovec *out) | rbuf_dequeue(ringbuf_t *rbuf, struct iovec **out) |
| { |
{ |
| int h, t, n; |
int h, t, n; |
| |
|
|
Line 206 rbuf_dequeue(ringbuf_t *rbuf, struct iovec *out)
|
Line 212 rbuf_dequeue(ringbuf_t *rbuf, struct iovec *out)
|
| return 1; |
return 1; |
| |
|
| if (out) |
if (out) |
| *out = rbuf->rb_buffer[t]; | *out = rbuf->rb_buffer + t; |
| |
|
| atomic_store_explicit((atomic_int*) &rbuf->rb_tail, n, memory_order_release); |
atomic_store_explicit((atomic_int*) &rbuf->rb_tail, n, memory_order_release); |
| |
return 0; |
| |
} |
| |
|
| |
|
| |
/* |
| |
* lrb_init() - Init linear ring buffer |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* @size = Size of ring buffer |
| |
* return: -1 error or 0 ok |
| |
*/ |
| |
int |
| |
lrb_init(lrbuf_t *lrb, u_int size) |
| |
{ |
| |
if (!lrb) |
| |
return -1; |
| |
|
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| |
|
| |
lrb->lrb_data = e_malloc(size); |
| |
if (!lrb->lrb_data) |
| |
return -1; |
| |
else |
| |
lrb->lrb_size = size; |
| |
memset(lrb->lrb_data, 0, lrb->lrb_size); |
| |
|
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* lrb_free() - Free linear ring buffer |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* return: none |
| |
*/ |
| |
void |
| |
lrb_free(lrbuf_t *lrb) |
| |
{ |
| |
if (!lrb) |
| |
return; |
| |
|
| |
if (lrb->lrb_data) { |
| |
e_free(lrb->lrb_data); |
| |
lrb->lrb_data = NULL; |
| |
lrb->lrb_size = 0; |
| |
} |
| |
|
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| |
} |
| |
|
| |
/* |
| |
* lrb_purge() - Purge all buffer |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* return: none |
| |
*/ |
| |
void |
| |
lrb_purge(lrbuf_t *lrb) |
| |
{ |
| |
if (!lrb) |
| |
return; |
| |
|
| |
if (lrb->lrb_data) |
| |
memset(lrb->lrb_data, 0, lrb->lrb_size); |
| |
|
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_full, 0, memory_order_relaxed); |
| |
} |
| |
|
| |
/* |
| |
* lrb_isempty() - Check buffer is empty |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* return: -1 error, 0 it isn't empty |
| |
*/ |
| |
int |
| |
lrb_isempty(lrbuf_t *lrb) |
| |
{ |
| |
if (!lrb) |
| |
return -1; |
| |
|
| |
return (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire) && |
| |
(atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire) == |
| |
atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire))); |
| |
} |
| |
|
| |
/* |
| |
* lrb_isfull() - Check buffer is full |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* return: -1 error or 0 it isn't full |
| |
*/ |
| |
int |
| |
lrb_isfull(lrbuf_t *lrb) |
| |
{ |
| |
int h, t; |
| |
|
| |
if (!lrb) |
| |
return -1; |
| |
if (!lrb->lrb_size) |
| |
return 1; |
| |
|
| |
if (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire)) |
| |
return 0; |
| |
|
| |
t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| |
h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire); |
| |
return (h == t); |
| |
} |
| |
|
| |
/* |
| |
* lrb_getw() - Get address for write |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* @len = Return available buffer length for write |
| |
* return: NULL error or !=NULL pointer for write |
| |
* remark: After use of lrb_getw() and write to pointer. |
| |
* You should update ring buffer with lrb_enqueue(,NULL,wrote_len,) |
| |
*/ |
| |
void * |
| |
lrb_getw(lrbuf_t *lrb, size_t *len) |
| |
{ |
| |
int h; |
| |
|
| |
if (!lrb || !lrb->lrb_data || !lrb->lrb_size) |
| |
return NULL; |
| |
|
| |
h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_relaxed); |
| |
if (len) |
| |
*len = lrb->lrb_size - h; |
| |
|
| |
return (lrb->lrb_data + h); |
| |
} |
| |
|
| |
/* |
| |
* lrb_enqueue() - Enqueue data to buffer |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* @data = Data |
| |
* @len = Length |
| |
* @lost = Permit to lost data |
| |
* return: -1 error, 1 buffer is full or 0 ok |
| |
*/ |
| |
int |
| |
lrb_enqueue(lrbuf_t *lrb, void *data, size_t len, int lost) |
| |
{ |
| |
int h, t = 0, n, t2 = 0, unused, drop = 0; |
| |
|
| |
if (!lrb || !lrb->lrb_data) |
| |
return -1; |
| |
if (!lrb->lrb_size || lrb->lrb_size <= len) |
| |
return 1; |
| |
|
| |
lrb_unused(lrb, unused); |
| |
if (!lost) { |
| |
if (len > unused) |
| |
return 1; |
| |
} else { |
| |
drop = len - unused; |
| |
if(drop < 0) |
| |
drop ^= drop; |
| |
} |
| |
|
| |
if (drop > 0) { |
| |
t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| |
t2 = (t + drop) % lrb->lrb_size; |
| |
} |
| |
h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_relaxed); |
| |
n = lrb->lrb_size - h; |
| |
if (len < n) { |
| |
if (data) |
| |
memcpy(lrb->lrb_data + h, data, len); |
| |
n = h + len; |
| |
} else { |
| |
if (data) { |
| |
memcpy(lrb->lrb_data + h, data, n); |
| |
memcpy(lrb->lrb_data, data + n, len - n); |
| |
} |
| |
n = len - n; |
| |
} |
| |
|
| |
h = n; |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_head, h, memory_order_release); |
| |
if (drop > 0) |
| |
while (42) { |
| |
n = t; |
| |
if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, |
| |
&n, t2, memory_order_release, memory_order_relaxed)) |
| |
break; |
| |
t = n; |
| |
t2 = (t + drop) % lrb->lrb_size; |
| |
} |
| |
else |
| |
t2 = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_full, (h == t2), memory_order_release); |
| |
return 0; |
| |
} |
| |
|
| |
/* |
| |
* lrb_getr() - Get address for read |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* @len = Return available data length for read |
| |
* return: NULL error or !=NULL pointer for read |
| |
* remark: After use of lrb_getr() and read from pointer. |
| |
* You could update ring buffer with lrb_dequeue(,NULL,read_len) |
| |
*/ |
| |
void * |
| |
lrb_getr(lrbuf_t *lrb, size_t *len) |
| |
{ |
| |
int t; |
| |
|
| |
if (!lrb || !lrb->lrb_data || !lrb->lrb_size) |
| |
return NULL; |
| |
|
| |
t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| |
if (len) |
| |
lrb_queued(lrb, *len); |
| |
|
| |
return (lrb->lrb_data + t); |
| |
} |
| |
|
| |
/* |
| |
* lrb_dequeue() - Dequeue data from buffer |
| |
* |
| |
* @lrb = Linear ring buffer |
| |
* @data = Data, if =NULL, just dequeue data |
| |
* @len = Length of data |
| |
* return: -1 error, 0 buffer is empty or >0 stored data bytes |
| |
*/ |
| |
int |
| |
lrb_dequeue(lrbuf_t *lrb, void *data, size_t len) |
| |
{ |
| |
int h, t, t2, n, l, f; |
| |
|
| |
if (!lrb) |
| |
return -1; |
| |
if (!lrb->lrb_size || !len || lrb_isempty(lrb)) |
| |
return 0; |
| |
if (lrb->lrb_size <= len) |
| |
len = lrb->lrb_size - 1; |
| |
|
| |
while (42) { |
| |
t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| |
h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire); |
| |
f = atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire); |
| |
|
| |
l = h - t; |
| |
if (l < 0) |
| |
l += lrb->lrb_size; |
| |
if (!l) { |
| |
if (!f) |
| |
return 0; |
| |
l = lrb->lrb_size; |
| |
} |
| |
if (l > len) |
| |
l = len; |
| |
|
| |
n = lrb->lrb_size - t; |
| |
if (l < n) { |
| |
if (data) |
| |
memcpy(data, lrb->lrb_data + t, l); |
| |
t2 = t + l; |
| |
} else { |
| |
if (data) { |
| |
memcpy(data, lrb->lrb_data + t, n); |
| |
memcpy(data + n, lrb->lrb_data, l - n); |
| |
} |
| |
t2 = l - n; |
| |
} |
| |
|
| |
n = t; |
| |
if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, |
| |
&n, t2, memory_order_release, memory_order_relaxed)) { |
| |
atomic_store_explicit((atomic_int*) &lrb->lrb_full, 0, memory_order_release); |
| |
return l; |
| |
} |
| |
} |
| |
|
| return 0; |
return 0; |
| } |
} |