|
version 1.1.2.1, 2025/09/26 08:58:05
|
version 1.4.4.2, 2026/02/10 17:24:39
|
|
Line 12 terms:
|
Line 12 terms:
|
| All of the documentation and software included in the ELWIX and AITNET |
All of the documentation and software included in the ELWIX and AITNET |
| Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
Releases is copyrighted by ELWIX - Sofia/Bulgaria <info@elwix.org> |
| |
|
| Copyright 2004 - 2025 | Copyright 2004 - 2026 |
| by Michael Pounov <misho@elwix.org>. All rights reserved. |
by Michael Pounov <misho@elwix.org>. All rights reserved. |
| |
|
| Redistribution and use in source and binary forms, with or without |
Redistribution and use in source and binary forms, with or without |
|
Line 59 rbuf_init(ringbuf_t *rbuf, int num)
|
Line 59 rbuf_init(ringbuf_t *rbuf, int num)
|
| if (!rbuf) |
if (!rbuf) |
| return -1; |
return -1; |
| |
|
| atomic_store_explicit(&rbuf->rb_head, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_head, 0, memory_order_relaxed); |
| atomic_store_explicit(&rbuf->rb_tail, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_tail, 0, memory_order_relaxed); |
| |
|
| rbuf->rb_buffer = e_calloc(num, sizeof(struct iovec)); |
rbuf->rb_buffer = e_calloc(num, sizeof(struct iovec)); |
| if (!rbuf->rb_buffer) |
if (!rbuf->rb_buffer) |
|
Line 90 rbuf_free(ringbuf_t *rbuf)
|
Line 90 rbuf_free(ringbuf_t *rbuf)
|
| rbuf->rb_bufnum = 0; |
rbuf->rb_bufnum = 0; |
| } |
} |
| |
|
| atomic_store_explicit(&rbuf->rb_head, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_head, 0, memory_order_relaxed); |
| atomic_store_explicit(&rbuf->rb_tail, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_tail, 0, memory_order_relaxed); |
| } |
} |
| |
|
| /* |
/* |
|
Line 109 rbuf_purge(ringbuf_t *rbuf)
|
Line 109 rbuf_purge(ringbuf_t *rbuf)
|
| if (rbuf->rb_buffer) |
if (rbuf->rb_buffer) |
| memset(rbuf->rb_buffer, 0, rbuf->rb_bufnum * sizeof(struct iovec)); |
memset(rbuf->rb_buffer, 0, rbuf->rb_bufnum * sizeof(struct iovec)); |
| |
|
| atomic_store_explicit(&rbuf->rb_head, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_head, 0, memory_order_relaxed); |
| atomic_store_explicit(&rbuf->rb_tail, 0, memory_order_relaxed); | atomic_store_explicit((atomic_int*) &rbuf->rb_tail, 0, memory_order_relaxed); |
| } |
} |
| |
|
| /* |
/* |
|
Line 125 rbuf_isempty(ringbuf_t *rbuf)
|
Line 125 rbuf_isempty(ringbuf_t *rbuf)
|
| if (!rbuf) |
if (!rbuf) |
| return -1; |
return -1; |
| |
|
| return (atomic_load_explicit(&rbuf->rb_head, memory_order_acquire) == | return (atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_acquire) == |
| atomic_load_explicit(&rbuf->rb_tail, memory_order_acquire)); | atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire)); |
| } |
} |
| |
|
| /* |
/* |
|
Line 138 rbuf_isempty(ringbuf_t *rbuf)
|
Line 138 rbuf_isempty(ringbuf_t *rbuf)
|
| int |
int |
| rbuf_isfull(ringbuf_t *rbuf) |
rbuf_isfull(ringbuf_t *rbuf) |
| { |
{ |
| |
int h, t; |
| |
|
| if (!rbuf) |
if (!rbuf) |
| return -1; |
return -1; |
| |
if (!rbuf->rb_bufnum) |
| |
return 1; |
| |
|
| return (((atomic_load_explicit(&rbuf->rb_head, memory_order_relaxed) + 1) % rbuf->rb_bufnum) == | t = atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire); |
| atomic_load_explicit(&rbuf->rb_tail, memory_order_acquire)); | h = atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed) + 1; |
| | if (h >= rbuf->rb_bufnum) |
| | h ^= h; |
| | |
| | return (h == t); |
| } |
} |
| |
|
| /* |
/* |
|
Line 159 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
Line 167 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
| int h, t, n; |
int h, t, n; |
| struct iovec *iov; |
struct iovec *iov; |
| |
|
| if (!rbuf) | if (!rbuf || !rbuf->rb_buffer) |
| return -1; |
return -1; |
| |
if (!rbuf->rb_bufnum) |
| |
return 1; |
| |
|
| h = atomic_load_explicit(&rbuf->rb_head, memory_order_relaxed); | h = atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed); |
| t = atomic_load_explicit(&rbuf->rb_tail, memory_order_acquire); | t = atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire); |
| n = (h + 1) % rbuf->rb_bufnum; |
n = (h + 1) % rbuf->rb_bufnum; |
| |
|
| if (n == t) |
if (n == t) |
|
Line 173 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
Line 183 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
| iov->iov_len = len; |
iov->iov_len = len; |
| iov->iov_base = data; |
iov->iov_base = data; |
| |
|
| atomic_store_explicit(&rbuf->rb_head, n, memory_order_release); | atomic_store_explicit((atomic_int*) &rbuf->rb_head, n, memory_order_release); |
| return 0; |
return 0; |
| } |
} |
| |
|
|
Line 185 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
Line 195 rbuf_enqueue(ringbuf_t *rbuf, void *data, size_t len)
|
| * return: -1 error, 1 buffer is empty or 0 ok |
* return: -1 error, 1 buffer is empty or 0 ok |
| */ |
*/ |
| int |
int |
| rbuf_dequeue(ringbuf_t *rbuf, struct iovec *out) | rbuf_dequeue(ringbuf_t *rbuf, struct iovec **out) |
| { |
{ |
| int h, t, n; |
int h, t, n; |
| |
|
| if (!rbuf) | if (!rbuf || !rbuf->rb_buffer) |
| return -1; |
return -1; |
| |
if (!rbuf->rb_bufnum) |
| |
return 1; |
| |
|
| h = atomic_load_explicit(&rbuf->rb_head, memory_order_acquire); | h = atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_acquire); |
| t = atomic_load_explicit(&rbuf->rb_tail, memory_order_relaxed); | t = atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_relaxed); |
| n = (t + 1) % rbuf->rb_bufnum; |
n = (t + 1) % rbuf->rb_bufnum; |
| |
|
| if (h == t) |
if (h == t) |
| return 1; |
return 1; |
| |
|
| if (out) |
if (out) |
| out = rbuf->rb_buffer + t; | *out = rbuf->rb_buffer + t; |
| |
|
| atomic_store_explicit(&rbuf->rb_tail, n, memory_order_release); | atomic_store_explicit((atomic_int*) &rbuf->rb_tail, n, memory_order_release); |
| | return 0; |
| | } |
| | |
| | |
| | /* |
| | * lrb_init() - Init linear ring buffer |
| | * |
| | * @lrb = Linear ring buffer |
| | * @size = Size of ring buffer |
| | * return: -1 error or 0 ok |
| | */ |
| | int |
| | lrb_init(lrbuf_t *lrb, u_int size) |
| | { |
| | if (!lrb) |
| | return -1; |
| | |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| | |
| | lrb->lrb_data = e_malloc(size); |
| | if (!lrb->lrb_data) |
| | return -1; |
| | else |
| | lrb->lrb_size = size; |
| | memset(lrb->lrb_data, 0, lrb->lrb_size); |
| | |
| | return 0; |
| | } |
| | |
| | /* |
| | * lrb_free() - Free linear ring buffer |
| | * |
| | * @lrb = Linear ring buffer |
| | * return: none |
| | */ |
| | void |
| | lrb_free(lrbuf_t *lrb) |
| | { |
| | if (!lrb) |
| | return; |
| | |
| | if (lrb->lrb_data) { |
| | e_free(lrb->lrb_data); |
| | lrb->lrb_data = NULL; |
| | lrb->lrb_size = 0; |
| | } |
| | |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| | } |
| | |
| | /* |
| | * lrb_purge() - Purge all buffer |
| | * |
| | * @lrb = Linear ring buffer |
| | * return: none |
| | */ |
| | void |
| | lrb_purge(lrbuf_t *lrb) |
| | { |
| | if (!lrb) |
| | return; |
| | |
| | if (lrb->lrb_data) |
| | memset(lrb->lrb_data, 0, lrb->lrb_size); |
| | |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); |
| | } |
| | |
| | /* |
| | * lrb_isempty() - Check buffer is empty |
| | * |
| | * @lrb = Linear ring buffer |
| | * return: -1 error, 0 it isn't empty |
| | */ |
| | int |
| | lrb_isempty(lrbuf_t *lrb) |
| | { |
| | if (!lrb) |
| | return -1; |
| | |
| | return (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire) && |
| | (atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire) == |
| | atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire))); |
| | } |
| | |
| | /* |
| | * lrb_isfull() - Check buffer is full |
| | * |
| | * @lrb = Linear ring buffer |
| | * return: -1 error or 0 it isn't full |
| | */ |
| | int |
| | lrb_isfull(lrbuf_t *lrb) |
| | { |
| | int h, t; |
| | |
| | if (!lrb) |
| | return -1; |
| | if (!lrb->lrb_size) |
| | return 1; |
| | |
| | if (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire)) |
| | return 0; |
| | |
| | t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| | h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_relaxed) + 1; |
| | if (h >= lrb->lrb_size) |
| | h ^= h; |
| | |
| | return (h == t); |
| | } |
| | |
| | /* |
| | * lrb_enqueue() - Enqueue data to buffer |
| | * |
| | * @lrb = Linear ring buffer |
| | * @data = Data |
| | * @len = Length |
| | * @lost = Permit to lost data |
| | * return: -1 error, 1 buffer is full or 0 ok |
| | */ |
| | int |
| | lrb_enqueue(lrbuf_t *lrb, void *data, size_t len, int lost) |
| | { |
| | int h, t, n, t2, unused, drop = 0; |
| | |
| | if (!lrb || !lrb->lrb_data) |
| | return -1; |
| | if (!lrb->lrb_size || lrb->lrb_size <= len) |
| | return 1; |
| | |
| | lrb_unused(lrb, unused); |
| | if (!lost) { |
| | if (len > unused) |
| | return 1; |
| | } else { |
| | drop = len - unused; |
| | if(drop < 0) |
| | drop ^= drop; |
| | } |
| | |
| | if (drop > 0) { |
| | t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| | t2 = (t + drop) % lrb->lrb_size; |
| | } |
| | h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_relaxed); |
| | n = lrb->lrb_size - h; |
| | if (len < n) { |
| | memcpy(lrb->lrb_data + h, data, len); |
| | n = h + len; |
| | } else { |
| | memcpy(lrb->lrb_data + h, data, n); |
| | memcpy(lrb->lrb_data, data + n, len - n); |
| | n = len - n; |
| | } |
| | |
| | h = n; |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_head, h, memory_order_release); |
| | if (drop > 0) |
| | while (42) { |
| | n = t; |
| | if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, |
| | &n, t2, memory_order_release, memory_order_relaxed)) |
| | break; |
| | t = n; |
| | t2 = (t + drop) % lrb->lrb_size; |
| | } |
| | else |
| | t2 = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_full, (h == t2), memory_order_release); |
| | return 0; |
| | } |
| | |
| | /* |
| | * lrb_dequeue() - Dequeue data from buffer |
| | * |
| | * @lrb = Linear ring buffer |
| | * @data = Data, if =NULL, just dequeue data |
| | * @len = Length of data |
| | * return: -1 error, 0 buffer is empty or >0 stored data bytes |
| | */ |
| | int |
| | lrb_dequeue(lrbuf_t *lrb, void *data, size_t len) |
| | { |
| | int h, t, t2, n, l; |
| | |
| | if (!lrb) |
| | return -1; |
| | if (!lrb->lrb_size || !len || lrb_isempty(lrb)) |
| | return 0; |
| | if (lrb->lrb_size <= len) |
| | len = lrb->lrb_size - 1; |
| | |
| | while (42) { |
| | t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); |
| | h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire); |
| | |
| | l = h - t; |
| | if (l < 0) |
| | l += lrb->lrb_size; |
| | if (!l) |
| | return 0; |
| | if (l > len) |
| | l = len; |
| | |
| | n = lrb->lrb_size - t; |
| | if (l < n) { |
| | if (data) |
| | memcpy(data, lrb->lrb_data + t, l); |
| | t2 = t + l; |
| | } else { |
| | if (data) { |
| | memcpy(data, lrb->lrb_data + t, n); |
| | memcpy(data + n, lrb->lrb_data, l - n); |
| | } |
| | t2 = l - n; |
| | } |
| | |
| | n = t; |
| | if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, |
| | &n, t2, memory_order_release, memory_order_relaxed)) { |
| | atomic_store_explicit((atomic_int*) &lrb->lrb_full, 0, memory_order_release); |
| | return l; |
| | } |
| | } |
| | |
| return 0; |
return 0; |
| } |
} |