--- libelwix/src/ring.c 2025/09/30 11:38:28 1.4 +++ libelwix/src/ring.c 2026/02/10 17:43:15 1.4.4.3 @@ -3,7 +3,7 @@ * by Michael Pounov * * $Author: misho $ -* $Id: ring.c,v 1.4 2025/09/30 11:38:28 misho Exp $ +* $Id: ring.c,v 1.4.4.3 2026/02/10 17:43:15 misho Exp $ * ************************************************************************** The ELWIX and AITNET software is distributed under the following @@ -12,7 +12,7 @@ terms: All of the documentation and software included in the ELWIX and AITNET Releases is copyrighted by ELWIX - Sofia/Bulgaria -Copyright 2004 - 2025 +Copyright 2004 - 2026 by Michael Pounov . All rights reserved. Redistribution and use in source and binary forms, with or without @@ -138,13 +138,19 @@ rbuf_isempty(ringbuf_t *rbuf) int rbuf_isfull(ringbuf_t *rbuf) { + int h, t; + if (!rbuf) return -1; if (!rbuf->rb_bufnum) return 1; - return (((atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed) + 1) % rbuf->rb_bufnum) == - atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire)); + t = atomic_load_explicit((atomic_int*) &rbuf->rb_tail, memory_order_acquire); + h = atomic_load_explicit((atomic_int*) &rbuf->rb_head, memory_order_relaxed) + 1; + if (h >= rbuf->rb_bufnum) + h ^= h; + + return (h == t); } /* @@ -209,5 +215,236 @@ rbuf_dequeue(ringbuf_t *rbuf, struct iovec **out) *out = rbuf->rb_buffer + t; atomic_store_explicit((atomic_int*) &rbuf->rb_tail, n, memory_order_release); + return 0; +} + + +/* + * lrb_init() - Init linear ring buffer + * + * @lrb = Linear ring buffer + * @size = Size of ring buffer + * return: -1 error or 0 ok + */ +int +lrb_init(lrbuf_t *lrb, u_int size) +{ + if (!lrb) + return -1; + + atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); + atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); + + lrb->lrb_data = e_malloc(size); + if (!lrb->lrb_data) + return -1; + else + lrb->lrb_size = size; + memset(lrb->lrb_data, 0, lrb->lrb_size); + + return 0; +} + +/* + * lrb_free() - Free linear ring buffer + * + * @lrb = Linear ring buffer + * return: none + */ +void +lrb_free(lrbuf_t *lrb) +{ + if (!lrb) + return; + + if (lrb->lrb_data) { + e_free(lrb->lrb_data); + lrb->lrb_data = NULL; + lrb->lrb_size = 0; + } + + atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); + atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); +} + +/* + * lrb_purge() - Purge all buffer + * + * @lrb = Linear ring buffer + * return: none + */ +void +lrb_purge(lrbuf_t *lrb) +{ + if (!lrb) + return; + + if (lrb->lrb_data) + memset(lrb->lrb_data, 0, lrb->lrb_size); + + atomic_store_explicit((atomic_int*) &lrb->lrb_head, 0, memory_order_relaxed); + atomic_store_explicit((atomic_int*) &lrb->lrb_tail, 0, memory_order_relaxed); + atomic_store_explicit((atomic_int*) &lrb->lrb_full, 0, memory_order_relaxed); +} + +/* + * lrb_isempty() - Check buffer is empty + * + * @lrb = Linear ring buffer + * return: -1 error, 0 it isn't empty + */ +int +lrb_isempty(lrbuf_t *lrb) +{ + if (!lrb) + return -1; + + return (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire) && + (atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire) == + atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire))); +} + +/* + * lrb_isfull() - Check buffer is full + * + * @lrb = Linear ring buffer + * return: -1 error or 0 it isn't full + */ +int +lrb_isfull(lrbuf_t *lrb) +{ + int h, t; + + if (!lrb) + return -1; + if (!lrb->lrb_size) + return 1; + + if (!atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire)) + return 0; + + t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); + h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire); + return (h == t); +} + +/* + * lrb_enqueue() - Enqueue data to buffer + * + * @lrb = Linear ring buffer + * @data = Data + * @len = Length + * @lost = Permit to lost data + * return: -1 error, 1 buffer is full or 0 ok + */ +int +lrb_enqueue(lrbuf_t *lrb, void *data, size_t len, int lost) +{ + int h, t, n, t2, unused, drop = 0; + + if (!lrb || !lrb->lrb_data) + return -1; + if (!lrb->lrb_size || lrb->lrb_size <= len) + return 1; + + lrb_unused(lrb, unused); + if (!lost) { + if (len > unused) + return 1; + } else { + drop = len - unused; + if(drop < 0) + drop ^= drop; + } + + if (drop > 0) { + t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); + t2 = (t + drop) % lrb->lrb_size; + } + h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_relaxed); + n = lrb->lrb_size - h; + if (len < n) { + memcpy(lrb->lrb_data + h, data, len); + n = h + len; + } else { + memcpy(lrb->lrb_data + h, data, n); + memcpy(lrb->lrb_data, data + n, len - n); + n = len - n; + } + + h = n; + atomic_store_explicit((atomic_int*) &lrb->lrb_head, h, memory_order_release); + if (drop > 0) + while (42) { + n = t; + if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, + &n, t2, memory_order_release, memory_order_relaxed)) + break; + t = n; + t2 = (t + drop) % lrb->lrb_size; + } + else + t2 = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); + atomic_store_explicit((atomic_int*) &lrb->lrb_full, (h == t2), memory_order_release); + return 0; +} + +/* + * lrb_dequeue() - Dequeue data from buffer + * + * @lrb = Linear ring buffer + * @data = Data, if =NULL, just dequeue data + * @len = Length of data + * return: -1 error, 0 buffer is empty or >0 stored data bytes + */ +int +lrb_dequeue(lrbuf_t *lrb, void *data, size_t len) +{ + int h, t, t2, n, l, f; + + if (!lrb) + return -1; + if (!lrb->lrb_size || !len || lrb_isempty(lrb)) + return 0; + if (lrb->lrb_size <= len) + len = lrb->lrb_size - 1; + + while (42) { + t = atomic_load_explicit((atomic_int*) &lrb->lrb_tail, memory_order_acquire); + h = atomic_load_explicit((atomic_int*) &lrb->lrb_head, memory_order_acquire); + f = atomic_load_explicit((atomic_int*) &lrb->lrb_full, memory_order_acquire); + + l = h - t; + if (l < 0) + l += lrb->lrb_size; + if (!l) { + if (!f) + return 0; + l = lrb->lrb_size; + } + if (l > len) + l = len; + + n = lrb->lrb_size - t; + if (l < n) { + if (data) + memcpy(data, lrb->lrb_data + t, l); + t2 = t + l; + } else { + if (data) { + memcpy(data, lrb->lrb_data + t, n); + memcpy(data + n, lrb->lrb_data, l - n); + } + t2 = l - n; + } + + n = t; + if (atomic_compare_exchange_weak_explicit((atomic_int*) &lrb->lrb_tail, + &n, t2, memory_order_release, memory_order_relaxed)) { + atomic_store_explicit((atomic_int*) &lrb->lrb_full, 0, memory_order_release); + return l; + } + } + return 0; }